aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2008-04-28 04:01:34 +0000
committerSteve French <sfrench@us.ibm.com>2008-04-28 04:01:34 +0000
commit1dbbb6077426f8ce63d6a59c5ac6613e1689cbde (patch)
tree6141d4d7a8eb7c557705bdfa764137d4fd2e4924
parentd09e860cf07e7c9ee12920a09f5080e30a12a23a (diff)
parent064922a805ec7aadfafdd27aa6b4908d737c3c1d (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/i386/boot.txt26
-rw-r--r--Documentation/ia64/kvm.txt82
-rw-r--r--Documentation/ide/ide-tape.txt211
-rw-r--r--Documentation/ide/ide.txt107
-rw-r--r--Documentation/ioctl-number.txt2
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/mips/AU1xxx_IDE.README46
-rw-r--r--Documentation/powerpc/kvm_440.txt41
-rw-r--r--Documentation/s390/kvm.txt125
-rw-r--r--Documentation/smart-config.txt98
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/Makefile1
-rw-r--r--arch/ia64/kvm/Kconfig49
-rw-r--r--arch/ia64/kvm/Makefile61
-rw-r--r--arch/ia64/kvm/asm-offsets.c251
-rw-r--r--arch/ia64/kvm/kvm-ia64.c1806
-rw-r--r--arch/ia64/kvm/kvm_fw.c500
-rw-r--r--arch/ia64/kvm/kvm_minstate.h273
-rw-r--r--arch/ia64/kvm/lapic.h25
-rw-r--r--arch/ia64/kvm/misc.h93
-rw-r--r--arch/ia64/kvm/mmio.c341
-rw-r--r--arch/ia64/kvm/optvfault.S918
-rw-r--r--arch/ia64/kvm/process.c970
-rw-r--r--arch/ia64/kvm/trampoline.S1038
-rw-r--r--arch/ia64/kvm/vcpu.c2163
-rw-r--r--arch/ia64/kvm/vcpu.h740
-rw-r--r--arch/ia64/kvm/vmm.c66
-rw-r--r--arch/ia64/kvm/vmm_ivt.S1424
-rw-r--r--arch/ia64/kvm/vti.h290
-rw-r--r--arch/ia64/kvm/vtlb.c636
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Kconfig.debug3
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c28
-rw-r--r--arch/powerpc/kvm/44x_tlb.c224
-rw-r--r--arch/powerpc/kvm/44x_tlb.h91
-rw-r--r--arch/powerpc/kvm/Kconfig42
-rw-r--r--arch/powerpc/kvm/Makefile15
-rw-r--r--arch/powerpc/kvm/booke_guest.c615
-rw-r--r--arch/powerpc/kvm/booke_host.c83
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S436
-rw-r--r--arch/powerpc/kvm/emulate.c760
-rw-r--r--arch/powerpc/kvm/powerpc.c436
-rw-r--r--arch/s390/Kconfig14
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/vtime.c1
-rw-r--r--arch/s390/kvm/Kconfig46
-rw-r--r--arch/s390/kvm/Makefile14
-rw-r--r--arch/s390/kvm/diag.c67
-rw-r--r--arch/s390/kvm/gaccess.h274
-rw-r--r--arch/s390/kvm/intercept.c216
-rw-r--r--arch/s390/kvm/interrupt.c592
-rw-r--r--arch/s390/kvm/kvm-s390.c685
-rw-r--r--arch/s390/kvm/kvm-s390.h64
-rw-r--r--arch/s390/kvm/priv.c323
-rw-r--r--arch/s390/kvm/sie64a.S47
-rw-r--r--arch/s390/kvm/sigp.c288
-rw-r--r--arch/s390/mm/pgtable.c65
-rw-r--r--arch/um/Kconfig.x86_647
-rw-r--r--arch/um/os-Linux/helper.c1
-rw-r--r--arch/um/sys-i386/Makefile2
-rw-r--r--arch/um/sys-x86_64/Makefile2
-rw-r--r--arch/x86/Kconfig26
-rw-r--r--arch/x86/Kconfig.cpu11
-rw-r--r--arch/x86/Kconfig.debug13
-rw-r--r--arch/x86/boot/header.S6
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/ia32/ia32_signal.c10
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c4
-rw-r--r--arch/x86/kernel/apic_32.c3
-rw-r--r--arch/x86/kernel/apic_64.c3
-rw-r--r--arch/x86/kernel/apm_32.c3
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/amd.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c18
-rw-r--r--arch/x86/kernel/cpu/nexgen.c59
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c14
-rw-r--r--arch/x86/kernel/crash.c3
-rw-r--r--arch/x86/kernel/e820_64.c35
-rw-r--r--arch/x86/kernel/genapic_64.c2
-rw-r--r--arch/x86/kernel/head64.c25
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/i8253.c6
-rw-r--r--arch/x86/kernel/io_apic_32.c2
-rw-r--r--arch/x86/kernel/io_apic_64.c2
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kdebugfs.c163
-rw-r--r--arch/x86/kernel/kvm.c248
-rw-r--r--arch/x86/kernel/kvmclock.c187
-rw-r--r--arch/x86/kernel/mfgpt_32.c3
-rw-r--r--arch/x86/kernel/mpparse.c39
-rw-r--r--arch/x86/kernel/pci-calgary_64.c1
-rw-r--r--arch/x86/kernel/process.c117
-rw-r--r--arch/x86/kernel/process_32.c118
-rw-r--r--arch/x86/kernel/process_64.c123
-rw-r--r--arch/x86/kernel/ptrace.c95
-rw-r--r--arch/x86/kernel/reboot.c13
-rw-r--r--arch/x86/kernel/setup_32.c10
-rw-r--r--arch/x86/kernel/setup_64.c36
-rw-r--r--arch/x86/kernel/signal_32.c35
-rw-r--r--arch/x86/kernel/signal_64.c30
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/summit_32.c5
-rw-r--r--arch/x86/kernel/tlb_64.c4
-rw-r--r--arch/x86/kernel/trampoline_32.S2
-rw-r--r--arch/x86/kernel/traps_32.c2
-rw-r--r--arch/x86/kvm/Kconfig13
-rw-r--r--arch/x86/kvm/Makefile6
-rw-r--r--arch/x86/kvm/i8254.c611
-rw-r--r--arch/x86/kvm/i8254.h63
-rw-r--r--arch/x86/kvm/irq.c18
-rw-r--r--arch/x86/kvm/irq.h3
-rw-r--r--arch/x86/kvm/kvm_svm.h2
-rw-r--r--arch/x86/kvm/lapic.c35
-rw-r--r--arch/x86/kvm/mmu.c672
-rw-r--r--arch/x86/kvm/mmu.h6
-rw-r--r--arch/x86/kvm/paging_tmpl.h86
-rw-r--r--arch/x86/kvm/segment_descriptor.h29
-rw-r--r--arch/x86/kvm/svm.c352
-rw-r--r--arch/x86/kvm/svm.h3
-rw-r--r--arch/x86/kvm/tss.h59
-rw-r--r--arch/x86/kvm/vmx.c278
-rw-r--r--arch/x86/kvm/vmx.h10
-rw-r--r--arch/x86/kvm/x86.c897
-rw-r--r--arch/x86/kvm/x86_emulate.c285
-rw-r--r--arch/x86/lib/Makefile3
-rw-r--r--arch/x86/lib/bitops_32.c70
-rw-r--r--arch/x86/lib/bitops_64.c175
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c10
-rw-r--r--arch/x86/mm/init_32.c6
-rw-r--r--arch/x86/mm/init_64.c38
-rw-r--r--arch/x86/mm/numa_64.c42
-rw-r--r--arch/x86/mm/pat.c33
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--block/bsg.c43
-rw-r--r--drivers/acpi/processor_idle.c19
-rw-r--r--drivers/char/agp/amd-k7-agp.c3
-rw-r--r--drivers/char/agp/frontend.c4
-rw-r--r--drivers/char/drm/ati_pcigart.c7
-rw-r--r--drivers/char/drm/drm.h17
-rw-r--r--drivers/char/drm/drmP.h133
-rw-r--r--drivers/char/drm/drm_agpsupport.c2
-rw-r--r--drivers/char/drm/drm_drv.c60
-rw-r--r--drivers/char/drm/drm_fops.c41
-rw-r--r--drivers/char/drm/drm_irq.c381
-rw-r--r--drivers/char/drm/drm_proc.c61
-rw-r--r--drivers/char/drm/drm_stub.c138
-rw-r--r--drivers/char/drm/drm_sysfs.c46
-rw-r--r--drivers/char/drm/drm_vm.c22
-rw-r--r--drivers/char/drm/i810_dma.c4
-rw-r--r--drivers/char/drm/i830_dma.c4
-rw-r--r--drivers/char/drm/i915_dma.c160
-rw-r--r--drivers/char/drm/i915_drm.h45
-rw-r--r--drivers/char/drm/i915_drv.c8
-rw-r--r--drivers/char/drm/i915_drv.h103
-rw-r--r--drivers/char/drm/i915_irq.c605
-rw-r--r--drivers/char/drm/mga_drv.c7
-rw-r--r--drivers/char/drm/mga_drv.h6
-rw-r--r--drivers/char/drm/mga_irq.c69
-rw-r--r--drivers/char/drm/r128_drv.c7
-rw-r--r--drivers/char/drm/r128_drv.h9
-rw-r--r--drivers/char/drm/r128_irq.c55
-rw-r--r--drivers/char/drm/radeon_drv.c8
-rw-r--r--drivers/char/drm/radeon_drv.h19
-rw-r--r--drivers/char/drm/radeon_irq.c171
-rw-r--r--drivers/char/drm/via_drv.c6
-rw-r--r--drivers/char/drm/via_drv.h7
-rw-r--r--drivers/char/drm/via_irq.c81
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/arm/bast-ide.c25
-rw-r--r--drivers/ide/arm/icside.c69
-rw-r--r--drivers/ide/arm/ide_arm.c20
-rw-r--r--drivers/ide/arm/palm_bk3710.c64
-rw-r--r--drivers/ide/arm/rapide.c11
-rw-r--r--drivers/ide/cris/ide-cris.c53
-rw-r--r--drivers/ide/h8300/ide-h8300.c10
-rw-r--r--drivers/ide/ide-acpi.c30
-rw-r--r--drivers/ide/ide-cd.c917
-rw-r--r--drivers/ide/ide-cd.h4
-rw-r--r--drivers/ide/ide-disk.c159
-rw-r--r--drivers/ide/ide-dma.c153
-rw-r--r--drivers/ide/ide-floppy.c34
-rw-r--r--drivers/ide/ide-generic.c36
-rw-r--r--drivers/ide/ide-io.c59
-rw-r--r--drivers/ide/ide-iops.c110
-rw-r--r--drivers/ide/ide-lib.c44
-rw-r--r--drivers/ide/ide-pnp.c45
-rw-r--r--drivers/ide/ide-probe.c285
-rw-r--r--drivers/ide/ide-proc.c169
-rw-r--r--drivers/ide/ide-scan-pci.c2
-rw-r--r--drivers/ide/ide-tape.c1204
-rw-r--r--drivers/ide/ide-taskfile.c48
-rw-r--r--drivers/ide/ide.c491
-rw-r--r--drivers/ide/legacy/ali14xx.c44
-rw-r--r--drivers/ide/legacy/buddha.c18
-rw-r--r--drivers/ide/legacy/dtc2278.c39
-rw-r--r--drivers/ide/legacy/falconide.c14
-rw-r--r--drivers/ide/legacy/gayle.c22
-rw-r--r--drivers/ide/legacy/hd.c78
-rw-r--r--drivers/ide/legacy/ht6560b.c57
-rw-r--r--drivers/ide/legacy/ide-4drives.c52
-rw-r--r--drivers/ide/legacy/ide-cs.c84
-rw-r--r--drivers/ide/legacy/ide_platform.c16
-rw-r--r--drivers/ide/legacy/macide.c8
-rw-r--r--drivers/ide/legacy/q40ide.c9
-rw-r--r--drivers/ide/legacy/qd65xx.c238
-rw-r--r--drivers/ide/legacy/qd65xx.h1
-rw-r--r--drivers/ide/legacy/umc8672.c92
-rw-r--r--drivers/ide/mips/au1xxx-ide.c130
-rw-r--r--drivers/ide/mips/swarm.c19
-rw-r--r--drivers/ide/pci/aec62xx.c39
-rw-r--r--drivers/ide/pci/alim15x3.c332
-rw-r--r--drivers/ide/pci/amd74xx.c19
-rw-r--r--drivers/ide/pci/atiixp.c29
-rw-r--r--drivers/ide/pci/cmd640.c294
-rw-r--r--drivers/ide/pci/cmd64x.c153
-rw-r--r--drivers/ide/pci/cs5520.c29
-rw-r--r--drivers/ide/pci/cs5530.c18
-rw-r--r--drivers/ide/pci/cs5535.c24
-rw-r--r--drivers/ide/pci/cy82c693.c97
-rw-r--r--drivers/ide/pci/delkin_cb.c20
-rw-r--r--drivers/ide/pci/generic.c10
-rw-r--r--drivers/ide/pci/hpt34x.c17
-rw-r--r--drivers/ide/pci/hpt366.c132
-rw-r--r--drivers/ide/pci/it8213.c34
-rw-r--r--drivers/ide/pci/it821x.c52
-rw-r--r--drivers/ide/pci/jmicron.c29
-rw-r--r--drivers/ide/pci/ns87415.c40
-rw-r--r--drivers/ide/pci/opti621.c82
-rw-r--r--drivers/ide/pci/pdc202xx_new.c23
-rw-r--r--drivers/ide/pci/pdc202xx_old.c126
-rw-r--r--drivers/ide/pci/piix.c17
-rw-r--r--drivers/ide/pci/rz1000.c2
-rw-r--r--drivers/ide/pci/sc1200.c39
-rw-r--r--drivers/ide/pci/scc_pata.c95
-rw-r--r--drivers/ide/pci/serverworks.c38
-rw-r--r--drivers/ide/pci/sgiioc4.c140
-rw-r--r--drivers/ide/pci/siimage.c142
-rw-r--r--drivers/ide/pci/sis5513.c253
-rw-r--r--drivers/ide/pci/sl82c105.c83
-rw-r--r--drivers/ide/pci/slc90e66.c22
-rw-r--r--drivers/ide/pci/tc86c001.c54
-rw-r--r--drivers/ide/pci/triflex.c12
-rw-r--r--drivers/ide/pci/trm290.c47
-rw-r--r--drivers/ide/pci/via82cxxx.c20
-rw-r--r--drivers/ide/ppc/mpc8xx.c70
-rw-r--r--drivers/ide/ppc/pmac.c183
-rw-r--r--drivers/ide/setup-pci.c226
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c75
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c15
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c51
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c23
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c122
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h33
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c6
-rw-r--r--drivers/infiniband/hw/nes/nes.c15
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c20
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c18
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c125
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/input/joystick/xpad.c34
-rw-r--r--drivers/macintosh/mac_hid.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c4
-rw-r--r--drivers/media/dvb/frontends/Kconfig8
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/mt312.h2
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c888
-rw-r--r--drivers/media/dvb/frontends/s5h1411.h90
-rw-r--r--drivers/media/video/au0828/Kconfig2
-rw-r--r--drivers/media/video/au0828/au0828-cards.c1
-rw-r--r--drivers/media/video/au0828/au0828-core.c26
-rw-r--r--drivers/media/video/au0828/au0828-dvb.c2
-rw-r--r--drivers/media/video/au0828/au0828-i2c.c6
-rw-r--r--drivers/media/video/au0828/au0828.h8
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c4
-rw-r--r--drivers/media/video/cx88/Kconfig1
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c6
-rw-r--r--drivers/media/video/cx88/cx88-cards.c1
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c32
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c21
-rw-r--r--drivers/media/video/pvrusb2/Kconfig1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c28
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.h22
-rw-r--r--drivers/media/video/tuner-core.c92
-rw-r--r--drivers/media/video/tuner-xc2028.c2
-rw-r--r--drivers/media/video/vivi.c2
-rw-r--r--drivers/misc/enclosure.c100
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/mlx4/alloc.c157
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/main.c3
-rw-r--r--drivers/net/mlx4/mlx4.h3
-rw-r--r--drivers/net/mlx4/qp.c31
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/kvm/Makefile9
-rw-r--r--drivers/s390/kvm/kvm_virtio.c338
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c39
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c114
-rw-r--r--drivers/scsi/FlashPoint.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aha152x.c7
-rw-r--r--drivers/scsi/aha1542.c26
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h23
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.reg115
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c835
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_inline.h859
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c181
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h177
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c33
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped1145
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped1555
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_seq.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h55
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg45
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c16
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c676
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_inline.h616
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c95
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h142
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c73
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c9
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped233
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped6
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c6
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y105
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l19
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c25
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h1
-rw-r--r--drivers/scsi/eata.c11
-rw-r--r--drivers/scsi/esp_scsi.c35
-rw-r--r--drivers/scsi/esp_scsi.h13
-rw-r--r--drivers/scsi/hosts.c29
-rw-r--r--drivers/scsi/ide-scsi.c19
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/mac_esp.c657
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c394
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_proc.c7
-rw-r--r--drivers/scsi/scsi_scan.c84
-rw-r--r--drivers/scsi/scsi_sysfs.c142
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_sas.c22
-rw-r--r--drivers/scsi/scsi_transport_spi.c33
-rw-r--r--drivers/scsi/sgiwd93.c4
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/st.c10
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/u14-34f.c9
-rw-r--r--fs/9p/vfs_super.c7
-rw-r--r--fs/binfmt_elf.c23
-rw-r--r--fs/binfmt_misc.c18
-rw-r--r--fs/binfmt_som.c10
-rw-r--r--fs/cifs/cifs_dfs_ref.c29
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/cifs/cifsproto.h8
-rw-r--r--fs/exec.c28
-rw-r--r--fs/fcntl.c40
-rw-r--r--fs/fuse/inode.c5
-rw-r--r--fs/locks.c1
-rw-r--r--fs/namespace.c9
-rw-r--r--fs/nfs/super.c8
-rw-r--r--fs/sysfs/file.c14
-rw-r--r--fs/sysfs/group.c83
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--include/asm-alpha/bitops.h5
-rw-r--r--include/asm-arm/arch-sa1100/ide.h6
-rw-r--r--include/asm-cris/arch-v10/ide.h11
-rw-r--r--include/asm-generic/bitops/__fls.h43
-rw-r--r--include/asm-generic/bitops/find.h2
-rw-r--r--include/asm-generic/bitops/fls64.h22
-rw-r--r--include/asm-ia64/bitops.h16
-rw-r--r--include/asm-ia64/gcc_intrin.h12
-rw-r--r--include/asm-ia64/kvm.h205
-rw-r--r--include/asm-ia64/kvm_host.h524
-rw-r--r--include/asm-ia64/kvm_para.h29
-rw-r--r--include/asm-ia64/processor.h63
-rw-r--r--include/asm-mips/bitops.h5
-rw-r--r--include/asm-mips/mach-au1x00/au1xxx_ide.h42
-rw-r--r--include/asm-parisc/bitops.h1
-rw-r--r--include/asm-powerpc/bitops.h5
-rw-r--r--include/asm-powerpc/kvm.h53
-rw-r--r--include/asm-powerpc/kvm_asm.h55
-rw-r--r--include/asm-powerpc/kvm_host.h152
-rw-r--r--include/asm-powerpc/kvm_para.h37
-rw-r--r--include/asm-powerpc/kvm_ppc.h88
-rw-r--r--include/asm-powerpc/mmu-44x.h2
-rw-r--r--include/asm-s390/Kbuild1
-rw-r--r--include/asm-s390/bitops.h1
-rw-r--r--include/asm-s390/kvm.h41
-rw-r--r--include/asm-s390/kvm_host.h234
-rw-r--r--include/asm-s390/kvm_para.h150
-rw-r--r--include/asm-s390/kvm_virtio.h53
-rw-r--r--include/asm-s390/lowcore.h15
-rw-r--r--include/asm-s390/mmu.h1
-rw-r--r--include/asm-s390/mmu_context.h8
-rw-r--r--include/asm-s390/pgtable.h93
-rw-r--r--include/asm-s390/setup.h1
-rw-r--r--include/asm-sh/bitops.h1
-rw-r--r--include/asm-sparc64/bitops.h1
-rw-r--r--include/asm-x86/bios_ebda.h2
-rw-r--r--include/asm-x86/bitops.h149
-rw-r--r--include/asm-x86/bitops_32.h166
-rw-r--r--include/asm-x86/bitops_64.h162
-rw-r--r--include/asm-x86/bootparam.h14
-rw-r--r--include/asm-x86/e820_64.h3
-rw-r--r--include/asm-x86/io_apic.h6
-rw-r--r--include/asm-x86/kvm.h41
-rw-r--r--include/asm-x86/kvm_host.h99
-rw-r--r--include/asm-x86/kvm_para.h55
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h2
-rw-r--r--include/asm-x86/pgtable_32.h8
-rw-r--r--include/asm-x86/posix_types.h8
-rw-r--r--include/asm-x86/processor.h2
-rw-r--r--include/asm-x86/ptrace.h2
-rw-r--r--include/asm-x86/reboot.h2
-rw-r--r--include/asm-x86/rio.h11
-rw-r--r--include/asm-x86/unistd.h8
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/bitops.h140
-rw-r--r--include/linux/bsg.h14
-rw-r--r--include/linux/compiler-gcc.h13
-rw-r--r--include/linux/file.h3
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/hdsmart.h126
-rw-r--r--include/linux/ide.h227
-rw-r--r--include/linux/kvm.h130
-rw-r--r--include/linux/kvm_host.h59
-rw-r--r--include/linux/kvm_para.h11
-rw-r--r--include/linux/kvm_types.h2
-rw-r--r--include/linux/mlx4/device.h40
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/sysfs.h4
-rw-r--r--include/scsi/scsi_device.h3
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c60
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Makefile1
-rw-r--r--lib/find_next_bit.c77
-rw-r--r--mm/bootmem.c164
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/sparse.c37
-rw-r--r--net/mac80211/mesh.h1
-rw-r--r--net/mac80211/mesh_hwmp.c1
-rw-r--r--scripts/Makefile.modpost2
-rw-r--r--scripts/mod/modpost.c8
-rw-r--r--virt/kvm/kvm_main.c230
-rw-r--r--virt/kvm/kvm_trace.c276
487 files changed, 36118 insertions, 13580 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index a82a113b4a4..1977fab3865 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -329,8 +329,6 @@ sgi-visws.txt
- short blurb on the SGI Visual Workstations.
sh/
- directory with info on porting Linux to a new architecture.
-smart-config.txt
- - description of the Smart Config makefile feature.
sound/
- directory with info on sound card support.
sparc/
diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt
index 2eb16100bb3..0fac3465f2e 100644
--- a/Documentation/i386/boot.txt
+++ b/Documentation/i386/boot.txt
@@ -42,6 +42,8 @@ Protocol 2.05: (Kernel 2.6.20) Make protected mode kernel relocatable.
Protocol 2.06: (Kernel 2.6.22) Added a field that contains the size of
the boot command line
+Protocol 2.09: (kernel 2.6.26) Added a field of 64-bit physical
+ pointer to single linked list of struct setup_data.
**** MEMORY LAYOUT
@@ -172,6 +174,8 @@ Offset Proto Name Meaning
0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
0248/4 2.08+ payload_offset Offset of kernel payload
024C/4 2.08+ payload_length Length of kernel payload
+0250/8 2.09+ setup_data 64-bit physical pointer to linked list
+ of struct setup_data
(1) For backwards compatibility, if the setup_sects field contains 0, the
real value is 4.
@@ -572,6 +576,28 @@ command line is entered using the following protocol:
covered by setup_move_size, so you may need to adjust this
field.
+Field name: setup_data
+Type: write (obligatory)
+Offset/size: 0x250/8
+Protocol: 2.09+
+
+ The 64-bit physical pointer to NULL terminated single linked list of
+ struct setup_data. This is used to define a more extensible boot
+ parameters passing mechanism. The definition of struct setup_data is
+ as follow:
+
+ struct setup_data {
+ u64 next;
+ u32 type;
+ u32 len;
+ u8 data[0];
+ };
+
+ Where, the next is a 64-bit physical pointer to the next node of
+ linked list, the next field of the last node is 0; the type is used
+ to identify the contents of data; the len is the length of data
+ field; the data holds the real payload.
+
**** MEMORY LAYOUT OF THE REAL-MODE CODE
diff --git a/Documentation/ia64/kvm.txt b/Documentation/ia64/kvm.txt
new file mode 100644
index 00000000000..bec9d815da3
--- /dev/null
+++ b/Documentation/ia64/kvm.txt
@@ -0,0 +1,82 @@
+Currently, kvm module in EXPERIMENTAL stage on IA64. This means that
+interfaces are not stable enough to use. So, plase had better don't run
+critical applications in virtual machine. We will try our best to make it
+strong in future versions!
+ Guide: How to boot up guests on kvm/ia64
+
+This guide is to describe how to enable kvm support for IA-64 systems.
+
+1. Get the kvm source from git.kernel.org.
+ Userspace source:
+ git clone git://git.kernel.org/pub/scm/virt/kvm/kvm-userspace.git
+ Kernel Source:
+ git clone git://git.kernel.org/pub/scm/linux/kernel/git/xiantao/kvm-ia64.git
+
+2. Compile the source code.
+ 2.1 Compile userspace code:
+ (1)cd ./kvm-userspace
+ (2)./configure
+ (3)cd kernel
+ (4)make sync LINUX= $kernel_dir (kernel_dir is the directory of kernel source.)
+ (5)cd ..
+ (6)make qemu
+ (7)cd qemu; make install
+
+ 2.2 Compile kernel source code:
+ (1) cd ./$kernel_dir
+ (2) Make menuconfig
+ (3) Enter into virtualization option, and choose kvm.
+ (4) make
+ (5) Once (4) done, make modules_install
+ (6) Make initrd, and use new kernel to reboot up host machine.
+ (7) Once (6) done, cd $kernel_dir/arch/ia64/kvm
+ (8) insmod kvm.ko; insmod kvm-intel.ko
+
+Note: For step 2, please make sure that host page size == TARGET_PAGE_SIZE of qemu, otherwise, may fail.
+
+3. Get Guest Firmware named as Flash.fd, and put it under right place:
+ (1) If you have the guest firmware (binary) released by Intel Corp for Xen, use it directly.
+
+ (2) If you have no firmware at hand, Please download its source from
+ hg clone http://xenbits.xensource.com/ext/efi-vfirmware.hg
+ you can get the firmware's binary in the directory of efi-vfirmware.hg/binaries.
+
+ (3) Rename the firware you owned to Flash.fd, and copy it to /usr/local/share/qemu
+
+4. Boot up Linux or Windows guests:
+ 4.1 Create or install a image for guest boot. If you have xen experience, it should be easy.
+
+ 4.2 Boot up guests use the following command.
+ /usr/local/bin/qemu-system-ia64 -smp xx -m 512 -hda $your_image
+ (xx is the number of virtual processors for the guest, now the maximum value is 4)
+
+5. Known possibile issue on some platforms with old Firmware.
+
+If meet strange host crashe issues, try to solve it through either of the following ways:
+
+(1): Upgrade your Firmware to the latest one.
+
+(2): Applying the below patch to kernel source.
+diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
+index 0b53344..f02b0f7 100644
+--- a/arch/ia64/kernel/pal.S
++++ b/arch/ia64/kernel/pal.S
+@@ -84,7 +84,8 @@ GLOBAL_ENTRY(ia64_pal_call_static)
+ mov ar.pfs = loc1
+ mov rp = loc0
+ ;;
+- srlz.d // seralize restoration of psr.l
++ srlz.i // seralize restoration of psr.l
++ ;;
+ br.ret.sptk.many b0
+ END(ia64_pal_call_static)
+
+6. Bug report:
+ If you found any issues when use kvm/ia64, Please post the bug info to kvm-ia64-devel mailing list.
+ https://lists.sourceforge.net/lists/listinfo/kvm-ia64-devel/
+
+Thanks for your interest! Let's work together, and make kvm/ia64 stronger and stronger!
+
+
+ Xiantao Zhang <xiantao.zhang@intel.com>
+ 2008.3.10
diff --git a/Documentation/ide/ide-tape.txt b/Documentation/ide/ide-tape.txt
index 658f271a373..3f348a0b21d 100644
--- a/Documentation/ide/ide-tape.txt
+++ b/Documentation/ide/ide-tape.txt
@@ -1,146 +1,65 @@
-/*
- * IDE ATAPI streaming tape driver.
- *
- * This driver is a part of the Linux ide driver.
- *
- * The driver, in co-operation with ide.c, basically traverses the
- * request-list for the block device interface. The character device
- * interface, on the other hand, creates new requests, adds them
- * to the request-list of the block device, and waits for their completion.
- *
- * Pipelined operation mode is now supported on both reads and writes.
- *
- * The block device major and minor numbers are determined from the
- * tape's relative position in the ide interfaces, as explained in ide.c.
- *
- * The character device interface consists of the following devices:
- *
- * ht0 major 37, minor 0 first IDE tape, rewind on close.
- * ht1 major 37, minor 1 second IDE tape, rewind on close.
- * ...
- * nht0 major 37, minor 128 first IDE tape, no rewind on close.
- * nht1 major 37, minor 129 second IDE tape, no rewind on close.
- * ...
- *
- * The general magnetic tape commands compatible interface, as defined by
- * include/linux/mtio.h, is accessible through the character device.
- *
- * General ide driver configuration options, such as the interrupt-unmask
- * flag, can be configured by issuing an ioctl to the block device interface,
- * as any other ide device.
- *
- * Our own ide-tape ioctl's can be issued to either the block device or
- * the character device interface.
- *
- * Maximal throughput with minimal bus load will usually be achieved in the
- * following scenario:
- *
- * 1. ide-tape is operating in the pipelined operation mode.
- * 2. No buffering is performed by the user backup program.
- *
- * Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive.
- *
- * Here are some words from the first releases of hd.c, which are quoted
- * in ide.c and apply here as well:
- *
- * | Special care is recommended. Have Fun!
- *
- *
- * An overview of the pipelined operation mode.
- *
- * In the pipelined write mode, we will usually just add requests to our
- * pipeline and return immediately, before we even start to service them. The
- * user program will then have enough time to prepare the next request while
- * we are still busy servicing previous requests. In the pipelined read mode,
- * the situation is similar - we add read-ahead requests into the pipeline,
- * before the user even requested them.
- *
- * The pipeline can be viewed as a "safety net" which will be activated when
- * the system load is high and prevents the user backup program from keeping up
- * with the current tape speed. At this point, the pipeline will get
- * shorter and shorter but the tape will still be streaming at the same speed.
- * Assuming we have enough pipeline stages, the system load will hopefully
- * decrease before the pipeline is completely empty, and the backup program
- * will be able to "catch up" and refill the pipeline again.
- *
- * When using the pipelined mode, it would be best to disable any type of
- * buffering done by the user program, as ide-tape already provides all the
- * benefits in the kernel, where it can be done in a more efficient way.
- * As we will usually not block the user program on a request, the most
- * efficient user code will then be a simple read-write-read-... cycle.
- * Any additional logic will usually just slow down the backup process.
- *
- * Using the pipelined mode, I get a constant over 400 KBps throughput,
- * which seems to be the maximum throughput supported by my tape.
- *
- * However, there are some downfalls:
- *
- * 1. We use memory (for data buffers) in proportional to the number
- * of pipeline stages (each stage is about 26 KB with my tape).
- * 2. In the pipelined write mode, we cheat and postpone error codes
- * to the user task. In read mode, the actual tape position
- * will be a bit further than the last requested block.
- *
- * Concerning (1):
- *
- * 1. We allocate stages dynamically only when we need them. When
- * we don't need them, we don't consume additional memory. In
- * case we can't allocate stages, we just manage without them
- * (at the expense of decreased throughput) so when Linux is
- * tight in memory, we will not pose additional difficulties.
- *
- * 2. The maximum number of stages (which is, in fact, the maximum
- * amount of memory) which we allocate is limited by the compile
- * time parameter IDETAPE_MAX_PIPELINE_STAGES.
- *
- * 3. The maximum number of stages is a controlled parameter - We
- * don't start from the user defined maximum number of stages
- * but from the lower IDETAPE_MIN_PIPELINE_STAGES (again, we
- * will not even allocate this amount of stages if the user
- * program can't handle the speed). We then implement a feedback
- * loop which checks if the pipeline is empty, and if it is, we
- * increase the maximum number of stages as necessary until we
- * reach the optimum value which just manages to keep the tape
- * busy with minimum allocated memory or until we reach
- * IDETAPE_MAX_PIPELINE_STAGES.
- *
- * Concerning (2):
- *
- * In pipelined write mode, ide-tape can not return accurate error codes
- * to the user program since we usually just add the request to the
- * pipeline without waiting for it to be serviced. In case an error
- * occurs, I will report it on the next user request.
- *
- * In the pipelined read mode, subsequent read requests or forward
- * filemark spacing will perform correctly, as we preserve all blocks
- * and filemarks which we encountered during our excess read-ahead.
- *
- * For accurate tape positioning and error reporting, disabling
- * pipelined mode might be the best option.
- *
- * You can enable/disable/tune the pipelined operation mode by adjusting
- * the compile time parameters below.
- *
- *
- * Possible improvements.
- *
- * 1. Support for the ATAPI overlap protocol.
- *
- * In order to maximize bus throughput, we currently use the DSC
- * overlap method which enables ide.c to service requests from the
- * other device while the tape is busy executing a command. The
- * DSC overlap method involves polling the tape's status register
- * for the DSC bit, and servicing the other device while the tape
- * isn't ready.
- *
- * In the current QIC development standard (December 1995),
- * it is recommended that new tape drives will *in addition*
- * implement the ATAPI overlap protocol, which is used for the
- * same purpose - efficient use of the IDE bus, but is interrupt
- * driven and thus has much less CPU overhead.
- *
- * ATAPI overlap is likely to be supported in most new ATAPI
- * devices, including new ATAPI cdroms, and thus provides us
- * a method by which we can achieve higher throughput when
- * sharing a (fast) ATA-2 disk with any (slow) new ATAPI device.
- */
+IDE ATAPI streaming tape driver.
+
+This driver is a part of the Linux ide driver.
+
+The driver, in co-operation with ide.c, basically traverses the
+request-list for the block device interface. The character device
+interface, on the other hand, creates new requests, adds them
+to the request-list of the block device, and waits for their completion.
+
+The block device major and minor numbers are determined from the
+tape's relative position in the ide interfaces, as explained in ide.c.
+
+The character device interface consists of the following devices:
+
+ht0 major 37, minor 0 first IDE tape, rewind on close.
+ht1 major 37, minor 1 second IDE tape, rewind on close.
+...
+nht0 major 37, minor 128 first IDE tape, no rewind on close.
+nht1 major 37, minor 129 second IDE tape, no rewind on close.
+...
+
+The general magnetic tape commands compatible interface, as defined by
+include/linux/mtio.h, is accessible through the character device.
+
+General ide driver configuration options, such as the interrupt-unmask
+flag, can be configured by issuing an ioctl to the block device interface,
+as any other ide device.
+
+Our own ide-tape ioctl's can be issued to either the block device or
+the character device interface.
+
+Maximal throughput with minimal bus load will usually be achieved in the
+following scenario:
+
+ 1. ide-tape is operating in the pipelined operation mode.
+ 2. No buffering is performed by the user backup program.
+
+Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive.
+
+Here are some words from the first releases of hd.c, which are quoted
+in ide.c and apply here as well:
+
+| Special care is recommended. Have Fun!
+
+Possible improvements:
+
+1. Support for the ATAPI overlap protocol.
+
+In order to maximize bus throughput, we currently use the DSC
+overlap method which enables ide.c to service requests from the
+other device while the tape is busy executing a command. The
+DSC overlap method involves polling the tape's status register
+for the DSC bit, and servicing the other device while the tape
+isn't ready.
+
+In the current QIC development standard (December 1995),
+it is recommended that new tape drives will *in addition*
+implement the ATAPI overlap protocol, which is used for the
+same purpose - efficient use of the IDE bus, but is interrupt
+driven and thus has much less CPU overhead.
+
+ATAPI overlap is likely to be supported in most new ATAPI
+devices, including new ATAPI cdroms, and thus provides us
+a method by which we can achieve higher throughput when
+sharing a (fast) ATA-2 disk with any (slow) new ATAPI device.
diff --git a/Documentation/ide/ide.txt b/Documentation/ide/ide.txt
index 486c699f4ae..0c78f4b1d9d 100644
--- a/Documentation/ide/ide.txt
+++ b/Documentation/ide/ide.txt
@@ -82,27 +82,26 @@ Drives are normally found by auto-probing and/or examining the CMOS/BIOS data.
For really weird situations, the apparent (fdisk) geometry can also be specified
on the kernel "command line" using LILO. The format of such lines is:
- hdx=cyls,heads,sects
-or hdx=cdrom
+ ide_core.chs=[interface_number.device_number]:cyls,heads,sects
+or ide_core.cdrom=[interface_number.device_number]
-where hdx can be any of hda through hdh, Three values are required
-(cyls,heads,sects). For example:
+For example:
- hdc=1050,32,64 hdd=cdrom
+ ide_core.chs=1.0:1050,32,64 ide_core.cdrom=1.1
-either {hda,hdb} or {hdc,hdd}. The results of successful auto-probing may
-override the physical geometry/irq specified, though the "original" geometry
-may be retained as the "logical" geometry for partitioning purposes (fdisk).
+The results of successful auto-probing may override the physical geometry/irq
+specified, though the "original" geometry may be retained as the "logical"
+geometry for partitioning purposes (fdisk).
If the auto-probing during boot time confuses a drive (ie. the drive works
with hd.c but not with ide.c), then an command line option may be specified
for each drive for which you'd like the drive to skip the hardware
probe/identification sequence. For example:
- hdb=noprobe
+ ide_core.noprobe=0.1
or
- hdc=768,16,32
- hdc=noprobe
+ ide_core.chs=1.0:768,16,32
+ ide_core.noprobe=1.0
Note that when only one IDE device is attached to an interface, it should be
jumpered as "single" or "master", *not* "slave". Many folks have had
@@ -118,9 +117,9 @@ If for some reason your cdrom drive is *not* found at boot time, you can force
the probe to look harder by supplying a kernel command line parameter
via LILO, such as:
- hdc=cdrom /* hdc = "master" on second interface */
+ ide_core.cdrom=1.0 /* "master" on second interface (hdc) */
or
- hdd=cdrom /* hdd = "slave" on second interface */
+ ide_core.cdrom=1.1 /* "slave" on second interface (hdd) */
For example, a GW2000 system might have a hard drive on the primary
interface (/dev/hda) and an IDE cdrom drive on the secondary interface
@@ -174,9 +173,7 @@ to /etc/modprobe.conf.
When ide.c is used as a module, you can pass command line parameters to the
driver using the "options=" keyword to insmod, while replacing any ',' with
-';'. For example:
-
- insmod ide.o options="hda=nodma hdb=nodma"
+';'.
================================================================================
@@ -184,57 +181,6 @@ driver using the "options=" keyword to insmod, while replacing any ',' with
Summary of ide driver parameters for kernel command line
--------------------------------------------------------
- "hdx=" is recognized for all "x" from "a" to "u", such as "hdc".
-
- "idex=" is recognized for all "x" from "0" to "9", such as "ide1".
-
- "hdx=noprobe" : drive may be present, but do not probe for it
-
- "hdx=none" : drive is NOT present, ignore cmos and do not probe
-
- "hdx=nowerr" : ignore the WRERR_STAT bit on this drive
-
- "hdx=cdrom" : drive is present, and is a cdrom drive
-
- "hdx=cyl,head,sect" : disk drive is present, with specified geometry
-
- "hdx=autotune" : driver will attempt to tune interface speed
- to the fastest PIO mode supported,
- if possible for this drive only.
- Not fully supported by all chipset types,
- and quite likely to cause trouble with
- older/odd IDE drives.
-
- "hdx=nodma" : disallow DMA
-
- "idebus=xx" : inform IDE driver of VESA/PCI bus speed in MHz,
- where "xx" is between 20 and 66 inclusive,
- used when tuning chipset PIO modes.
- For PCI bus, 25 is correct for a P75 system,
- 30 is correct for P90,P120,P180 systems,
- and 33 is used for P100,P133,P166 systems.
- If in doubt, use idebus=33 for PCI.
- As for VLB, it is safest to not specify it.
- Bigger values are safer than smaller ones.
-
- "idex=serialize" : do not overlap operations on idex. Please note
- that you will have to specify this option for
- both the respective primary and secondary channel
- to take effect.
-
- "idex=reset" : reset interface after probe
-
- "idex=ata66" : informs the interface that it has an 80c cable
- for chipsets that are ATA-66 capable, but the
- ability to bit test for detection is currently
- unknown.
-
- "ide=doubler" : probe/support IDE doublers on Amiga
-
-There may be more options than shown -- use the source, Luke!
-
-Everything else is rejected with a "BAD OPTION" message.
-
For legacy IDE VLB host drivers (ali14xx/dtc2278/ht6560b/qd65xx/umc8672)
you need to explicitly enable probing by using "probe" kernel parameter,
i.e. to enable probing for ALI M14xx chipsets (ali14xx host driver) use:
@@ -251,6 +197,33 @@ are detected automatically).
You also need to use "probe" kernel parameter for ide-4drives driver
(support for IDE generic chipset with four drives on one port).
+To enable support for IDE doublers on Amiga use "doubler" kernel parameter
+for gayle host driver (i.e. "gayle.doubler" if the driver is built-in).
+
+To force ignoring cable detection (this should be needed only if you're using
+short 40-wires cable which cannot be automatically detected - if this is not
+a case please report it as a bug instead) use "ignore_cable" kernel parameter:
+
+* "ide_core.ignore_cable=[interface_number]" boot option if IDE is built-in
+ (i.e. "ide_core.ignore_cable=1" to force ignoring cable for "ide1")
+
+* "ignore_cable=[interface_number]" module parameter (for ide_core module)
+ if IDE is compiled as module
+
+Other kernel parameters for ide_core are:
+
+* "nodma=[interface_number.device_number]" to disallow DMA for a device
+
+* "noflush=[interface_number.device_number]" to disable flush requests
+
+* "noprobe=[interface_number.device_number]" to skip probing
+
+* "nowerr=[interface_number.device_number]" to ignore the WRERR_STAT bit
+
+* "cdrom=[interface_number.device_number]" to force device as a CD-ROM
+
+* "chs=[interface_number.device_number]" to force device as a disk (using CHS)
+
================================================================================
Some Terminology
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index c18363bd8d1..240ce7a56c4 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -183,6 +183,8 @@ Code Seq# Include File Comments
0xAC 00-1F linux/raw.h
0xAD 00 Netfilter device in development:
<mailto:rusty@rustcorp.com.au>
+0xAE all linux/kvm.h Kernel-based Virtual Machine
+ <mailto:kvm-devel@lists.sourceforge.net>
0xB0 all RATIO devices in development:
<mailto:vgo@ratio.de>
0xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca>
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index bf6303ec0bd..e5f3d918316 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -772,10 +772,6 @@ and is between 256 and 4096 characters. It is defined in the file
Format: ide=nodma or ide=doubler
See Documentation/ide/ide.txt.
- ide?= [HW] (E)IDE subsystem
- Format: ide?=ata66 or chipset specific parameters.
- See Documentation/ide/ide.txt.
-
idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed
See Documentation/ide/ide.txt.
diff --git a/Documentation/mips/AU1xxx_IDE.README b/Documentation/mips/AU1xxx_IDE.README
index 5c8334123f4..25a6ed1aaa5 100644
--- a/Documentation/mips/AU1xxx_IDE.README
+++ b/Documentation/mips/AU1xxx_IDE.README
@@ -46,8 +46,6 @@ Two files are introduced:
a) 'include/asm-mips/mach-au1x00/au1xxx_ide.h'
containes : struct _auide_hwif
- struct drive_list_entry dma_white_list
- struct drive_list_entry dma_black_list
timing parameters for PIO mode 0/1/2/3/4
timing parameters for MWDMA 0/1/2
@@ -63,12 +61,6 @@ Four configs variables are introduced:
CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ - maximum transfer size
per descriptor
-If MWDMA is enabled and the connected hard disc is not on the white list, the
-kernel switches to a "safe mwdma mode" at boot time. In this mode the IDE
-performance is substantial slower then in full speed mwdma. In this case
-please add your hard disc to the white list (follow instruction from 'ADD NEW
-HARD DISC TO WHITE OR BLACK LIST' section).
-
SUPPORTED IDE MODES
-------------------
@@ -120,44 +112,6 @@ CONFIG_IDEDMA_AUTO=y
Also undefine 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to
disable the burst support on DBDMA controller.
-ADD NEW HARD DISC TO WHITE OR BLACK LIST
-----------------------------------------
-
-Step 1 : detect the model name of your hard disc
-
- a) connect your hard disc to the AU1XXX
-
- b) boot your kernel and get the hard disc model.
-
- Example boot log:
-
- --snipped--
- Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
- ide: Assuming 50MHz system bus speed for PIO modes; override with idebus=xx
- Au1xxx IDE(builtin) configured for MWDMA2
- Probing IDE interface ide0...
- hda: Maxtor 6E040L0, ATA DISK drive
- ide0 at 0xac800000-0xac800007,0xac8001c0 on irq 64
- hda: max request size: 64KiB
- hda: 80293248 sectors (41110 MB) w/2048KiB Cache, CHS=65535/16/63, (U)DMA
- --snipped--
-
- In this example 'Maxtor 6E040L0'.
-
-Step 2 : edit 'include/asm-mips/mach-au1x00/au1xxx_ide.h'
-
- Add your hard disc to the dma_white_list or dma_black_list structur.
-
-Step 3 : Recompile the kernel
-
- Enable MWDMA support in the kernel configuration. Recompile the kernel and
- reboot.
-
-Step 4 : Tests
-
- If you have add a hard disc to the white list, please run some stress tests
- for verification.
-
ACKNOWLEDGMENTS
---------------
diff --git a/Documentation/powerpc/kvm_440.txt b/Documentation/powerpc/kvm_440.txt
new file mode 100644
index 00000000000..c02a003fa03
--- /dev/null
+++ b/Documentation/powerpc/kvm_440.txt
@@ -0,0 +1,41 @@
+Hollis Blanchard <hollisb@us.ibm.com>
+15 Apr 2008
+
+Various notes on the implementation of KVM for PowerPC 440:
+
+To enforce isolation, host userspace, guest kernel, and guest userspace all
+run at user privilege level. Only the host kernel runs in supervisor mode.
+Executing privileged instructions in the guest traps into KVM (in the host
+kernel), where we decode and emulate them. Through this technique, unmodified
+440 Linux kernels can be run (slowly) as guests. Future performance work will
+focus on reducing the overhead and frequency of these traps.
+
+The usual code flow is started from userspace invoking an "run" ioctl, which
+causes KVM to switch into guest context. We use IVPR to hijack the host
+interrupt vectors while running the guest, which allows us to direct all
+interrupts to kvmppc_handle_interrupt(). At this point, we could either
+- handle the interrupt completely (e.g. emulate "mtspr SPRG0"), or
+- let the host interrupt handler run (e.g. when the decrementer fires), or
+- return to host userspace (e.g. when the guest performs device MMIO)
+
+Address spaces: We take advantage of the fact that Linux doesn't use the AS=1
+address space (in host or guest), which gives us virtual address space to use
+for guest mappings. While the guest is running, the host kernel remains mapped
+in AS=0, but the guest can only use AS=1 mappings.
+
+TLB entries: The TLB entries covering the host linear mapping remain
+present while running the guest. This reduces the overhead of lightweight
+exits, which are handled by KVM running in the host kernel. We keep three
+copies of the TLB:
+ - guest TLB: contents of the TLB as the guest sees it
+ - shadow TLB: the TLB that is actually in hardware while guest is running
+ - host TLB: to restore TLB state when context switching guest -> host
+When a TLB miss occurs because a mapping was not present in the shadow TLB,
+but was present in the guest TLB, KVM handles the fault without invoking the
+guest. Large guest pages are backed by multiple 4KB shadow pages through this
+mechanism.
+
+IO: MMIO and DCR accesses are emulated by userspace. We use virtio for network
+and block IO, so those drivers must be enabled in the guest. It's possible
+that some qemu device emulation (e.g. e1000 or rtl8139) may also work with
+little effort.
diff --git a/Documentation/s390/kvm.txt b/Documentation/s390/kvm.txt
new file mode 100644
index 00000000000..6f5ceb0f09f
--- /dev/null
+++ b/Documentation/s390/kvm.txt
@@ -0,0 +1,125 @@
+*** BIG FAT WARNING ***
+The kvm module is currently in EXPERIMENTAL state for s390. This means that
+the interface to the module is not yet considered to remain stable. Thus, be
+prepared that we keep breaking your userspace application and guest
+compatibility over and over again until we feel happy with the result. Make sure
+your guest kernel, your host kernel, and your userspace launcher are in a
+consistent state.
+
+This Documentation describes the unique ioctl calls to /dev/kvm, the resulting
+kvm-vm file descriptors, and the kvm-vcpu file descriptors that differ from x86.
+
+1. ioctl calls to /dev/kvm
+KVM does support the following ioctls on s390 that are common with other
+architectures and do behave the same:
+KVM_GET_API_VERSION
+KVM_CREATE_VM (*) see note
+KVM_CHECK_EXTENSION
+KVM_GET_VCPU_MMAP_SIZE
+
+Notes:
+* KVM_CREATE_VM may fail on s390, if the calling process has multiple
+threads and has not called KVM_S390_ENABLE_SIE before.
+
+In addition, on s390 the following architecture specific ioctls are supported:
+ioctl: KVM_S390_ENABLE_SIE
+args: none
+see also: include/linux/kvm.h
+This call causes the kernel to switch on PGSTE in the user page table. This
+operation is needed in order to run a virtual machine, and it requires the
+calling process to be single-threaded. Note that the first call to KVM_CREATE_VM
+will implicitly try to switch on PGSTE if the user process has not called
+KVM_S390_ENABLE_SIE before. User processes that want to launch multiple threads
+before creating a virtual machine have to call KVM_S390_ENABLE_SIE, or will
+observe an error calling KVM_CREATE_VM. Switching on PGSTE is a one-time
+operation, is not reversible, and will persist over the entire lifetime of
+the calling process. It does not have any user-visible effect other than a small
+performance penalty.
+
+2. ioctl calls to the kvm-vm file descriptor
+KVM does support the following ioctls on s390 that are common with other
+architectures and do behave the same:
+KVM_CREATE_VCPU
+KVM_SET_USER_MEMORY_REGION (*) see note
+KVM_GET_DIRTY_LOG (**) see note
+
+Notes:
+* kvm does only allow exactly one memory slot on s390, which has to start
+ at guest absolute address zero and at a user address that is aligned on any
+ page boundary. This hardware "limitation" allows us to have a few unique
+ optimizations. The memory slot doesn't have to be filled
+ with memory actually, it may contain sparse holes. That said, with different
+ user memory layout this does still allow a large flexibility when
+ doing the guest memory setup.
+** KVM_GET_DIRTY_LOG doesn't work properly yet. The user will receive an empty
+log. This ioctl call is only needed for guest migration, and we intend to
+implement this one in the future.
+
+In addition, on s390 the following architecture specific ioctls for the kvm-vm
+file descriptor are supported:
+ioctl: KVM_S390_INTERRUPT
+args: struct kvm_s390_interrupt *
+see also: include/linux/kvm.h
+This ioctl is used to submit a floating interrupt for a virtual machine.
+Floating interrupts may be delivered to any virtual cpu in the configuration.
+Only some interrupt types defined in include/linux/kvm.h make sense when
+submitted as floating interrupts. The following interrupts are not considered
+to be useful as floating interrupts, and a call to inject them will result in
+-EINVAL error code: program interrupts and interprocessor signals. Valid
+floating interrupts are:
+KVM_S390_INT_VIRTIO
+KVM_S390_INT_SERVICE
+
+3. ioctl calls to the kvm-vcpu file descriptor
+KVM does support the following ioctls on s390 that are common with other
+architectures and do behave the same:
+KVM_RUN
+KVM_GET_REGS
+KVM_SET_REGS
+KVM_GET_SREGS
+KVM_SET_SREGS
+KVM_GET_FPU
+KVM_SET_FPU
+
+In addition, on s390 the following architecture specific ioctls for the
+kvm-vcpu file descriptor are supported:
+ioctl: KVM_S390_INTERRUPT
+args: struct kvm_s390_interrupt *
+see also: include/linux/kvm.h
+This ioctl is used to submit an interrupt for a specific virtual cpu.
+Only some interrupt types defined in include/linux/kvm.h make sense when
+submitted for a specific cpu. The following interrupts are not considered
+to be useful, and a call to inject them will result in -EINVAL error code:
+service processor calls and virtio interrupts. Valid interrupt types are:
+KVM_S390_PROGRAM_INT
+KVM_S390_SIGP_STOP
+KVM_S390_RESTART
+KVM_S390_SIGP_SET_PREFIX
+KVM_S390_INT_EMERGENCY
+
+ioctl: KVM_S390_STORE_STATUS
+args: unsigned long
+see also: include/linux/kvm.h
+This ioctl stores the state of the cpu at the guest real address given as
+argument, unless one of the following values defined in include/linux/kvm.h
+is given as arguement:
+KVM_S390_STORE_STATUS_NOADDR - the CPU stores its status to the save area in
+absolute lowcore as defined by the principles of operation
+KVM_S390_STORE_STATUS_PREFIXED - the CPU stores its status to the save area in
+its prefix page just like the dump tool that comes with zipl. This is useful
+to create a system dump for use with lkcdutils or crash.
+
+ioctl: KVM_S390_SET_INITIAL_PSW
+args: struct kvm_s390_psw *
+see also: include/linux/kvm.h
+This ioctl can be used to set the processor status word (psw) of a stopped cpu
+prior to running it with KVM_RUN. Note that this call is not required to modify
+the psw during sie intercepts that fall back to userspace because struct kvm_run
+does contain the psw, and this value is evaluated during reentry of KVM_RUN
+after the intercept exit was recognized.
+
+ioctl: KVM_S390_INITIAL_RESET
+args: none
+see also: include/linux/kvm.h
+This ioctl can be used to perform an initial cpu reset as defined by the
+principles of operation. The target cpu has to be in stopped state.
diff --git a/Documentation/smart-config.txt b/Documentation/smart-config.txt
deleted file mode 100644
index 8467447b5a8..00000000000
--- a/Documentation/smart-config.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-Smart CONFIG_* Dependencies
-1 August 1999
-
-Michael Chastain <mec@shout.net>
-Werner Almesberger <almesber@lrc.di.epfl.ch>
-Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>
-
-Here is the problem:
-
- Suppose that drivers/net/foo.c has the following lines:
-
- #include <linux/config.h>
-
- ...
-
- #ifdef CONFIG_FOO_AUTOFROB
- /* Code for auto-frobbing */
- #else
- /* Manual frobbing only */
- #endif
-
- ...
-
- #ifdef CONFIG_FOO_MODEL_TWO
- /* Code for model two */
- #endif
-
- Now suppose the user (the person building kernels) reconfigures the
- kernel to change some unrelated setting. This will regenerate the
- file include/linux/autoconf.h, which will cause include/linux/config.h
- to be out of date, which will cause drivers/net/foo.c to be recompiled.
-
- Most kernel sources, perhaps 80% of them, have at least one CONFIG_*
- dependency somewhere. So changing _any_ CONFIG_* setting requires
- almost _all_ of the kernel to be recompiled.
-
-Here is the solution:
-
- We've made the dependency generator, mkdep.c, smarter. Instead of
- generating this dependency:
-
- drivers/net/foo.c: include/linux/config.h
-
- It now generates these dependencies:
-
- drivers/net/foo.c: \
- include/config/foo/autofrob.h \
- include/config/foo/model/two.h
-
- So drivers/net/foo.c depends only on the CONFIG_* lines that
- it actually uses.
-
- A new program, split-include.c, runs at the beginning of
- compilation (make bzImage or make zImage). split-include reads
- include/linux/autoconf.h and updates the include/config/ tree,
- writing one file per option. It updates only the files for options
- that have changed.
-
-Flag Dependencies
-
- Martin Von Loewis contributed another feature to this patch:
- 'flag dependencies'. The idea is that a .o file depends on
- the compilation flags used to build it. The file foo.o has
- its flags stored in .flags.foo.o.
-
- Suppose the user changes the foo driver from resident to modular.
- 'make' will notice that the current foo.o was not compiled with
- -DMODULE and will recompile foo.c.
-
- All .o files made from C source have flag dependencies. So do .o
- files made with ld, and .a files made with ar. However, .o files
- made from assembly source do not have flag dependencies (nobody
- needs this yet, but it would be good to fix).
-
-Per-source-file Flags
-
- Flag dependencies also work with per-source-file flags.
- You can specify compilation flags for individual source files
- like this:
-
- CFLAGS_foo.o = -DSPECIAL_FOO_DEFINE
-
- This helps clean up drivers/net/Makefile, drivers/scsi/Makefile,
- and several other Makefiles.
-
-Credit
-
- Werner Almesberger had the original idea and wrote the first
- version of this patch.
-
- Michael Chastain picked it up and continued development. He is
- now the principal author and maintainer. Please report any bugs
- to him.
-
- Martin von Loewis wrote flag dependencies, with some modifications
- by Michael Chastain.
-
- Thanks to all of the beta testers.
diff --git a/MAINTAINERS b/MAINTAINERS
index a942f385249..c1dd1ae7b13 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2329,6 +2329,13 @@ L: kvm-devel@lists.sourceforge.net
W: kvm.sourceforge.net
S: Supported
+KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
+P: Hollis Blanchard
+M: hollisb@us.ibm.com
+L: kvm-ppc-devel@lists.sourceforge.net
+W: kvm.sourceforge.net
+S: Supported
+
KERNEL VIRTUAL MACHINE For Itanium(KVM/IA64)
P: Anthony Xu
M: anthony.xu@intel.com
@@ -2338,6 +2345,16 @@ L: kvm-ia64-devel@lists.sourceforge.net
W: kvm.sourceforge.net
S: Supported
+KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
+P: Carsten Otte
+M: cotte@de.ibm.com
+P: Christian Borntraeger
+M: borntraeger@de.ibm.com
+M: linux390@de.ibm.com
+L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
+S: Supported
+
KEXEC
P: Eric Biederman
M: ebiederm@xmission.com
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index cd13e138bd0..3aa6c821449 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -19,6 +19,7 @@ config IA64
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
+ select HAVE_KVM
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -589,6 +590,8 @@ config MSPEC
source "fs/Kconfig"
+source "arch/ia64/kvm/Kconfig"
+
source "lib/Kconfig"
#
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index f1645c4f703..ec4cca477f4 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -57,6 +57,7 @@ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
+core-$(CONFIG_KVM) += arch/ia64/kvm/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
new file mode 100644
index 00000000000..7914e482850
--- /dev/null
+++ b/arch/ia64/kvm/Kconfig
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+config HAVE_KVM
+ bool
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ depends on HAVE_KVM || IA64
+ default y
+ ---help---
+ Say Y here to get to see options for using your Linux host to run other
+ operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on HAVE_KVM && EXPERIMENTAL
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ ---help---
+ Support hosting fully virtualized guest machines using hardware
+ virtualization extensions. You will need a fairly recent
+ processor equipped with virtualization extensions. You will also
+ need to select one or more of the processor modules below.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ To compile this as a module, choose M here: the module
+ will be called kvm.
+
+ If unsure, say N.
+
+config KVM_INTEL
+ tristate "KVM for Intel Itanium 2 processors support"
+ depends on KVM && m
+ ---help---
+ Provides support for KVM on Itanium 2 processors equipped with the VT
+ extensions.
+
+config KVM_TRACE
+ bool
+
+endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
new file mode 100644
index 00000000000..41b034ffa73
--- /dev/null
+++ b/arch/ia64/kvm/Makefile
@@ -0,0 +1,61 @@
+#This Make file is to generate asm-offsets.h and build source.
+#
+
+#Generate asm-offsets.h for vmm module build
+offsets-file := asm-offsets.h
+
+always := $(offsets-file)
+targets := $(offsets-file)
+targets += arch/ia64/kvm/asm-offsets.s
+clean-files := $(addprefix $(objtree)/,$(targets) $(obj)/memcpy.S $(obj)/memset.S)
+
+# Default sed regexp - multiline due to syntax constraints
+define sed-y
+ "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
+endef
+
+quiet_cmd_offsets = GEN $@
+define cmd_offsets
+ (set -e; \
+ echo "#ifndef __ASM_KVM_OFFSETS_H__"; \
+ echo "#define __ASM_KVM_OFFSETS_H__"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY."; \
+ echo " *"; \
+ echo " * This file was generated by Makefile"; \
+ echo " *"; \
+ echo " */"; \
+ echo ""; \
+ sed -ne $(sed-y) $<; \
+ echo ""; \
+ echo "#endif" ) > $@
+endef
+# We use internal rules to avoid the "is up to date" message from make
+arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c
+ $(call if_changed_dep,cc_s_c)
+
+$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
+ $(call cmd,offsets)
+
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
+
+$(addprefix $(objtree)/,$(obj)/memcpy.S $(obj)/memset.S):
+ $(shell ln -snf ../lib/memcpy.S $(src)/memcpy.S)
+ $(shell ln -snf ../lib/memset.S $(src)/memset.S)
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
+
+kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
+obj-$(CONFIG_KVM) += kvm.o
+
+FORCE : $(obj)/$(offsets-file)
+EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
+kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
+ vtlb.o process.o
+#Add link memcpy and memset to avoid possible structure assignment error
+kvm-intel-objs += memset.o memcpy.o
+obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
new file mode 100644
index 00000000000..4e3dc13a619
--- /dev/null
+++ b/arch/ia64/kvm/asm-offsets.c
@@ -0,0 +1,251 @@
+/*
+ * asm-offsets.c Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ *
+ * Anthony Xu <anthony.xu@intel.com>
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ * Copyright (c) 2007 Intel Corporation KVM support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/autoconf.h>
+#include <linux/kvm_host.h>
+
+#include "vcpu.h"
+
+#define task_struct kvm_vcpu
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : :)
+
+#define OFFSET(_sym, _str, _mem) \
+ DEFINE(_sym, offsetof(_str, _mem));
+
+void foo(void)
+{
+ DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
+ DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs));
+
+ BLANK();
+
+ DEFINE(VMM_VCPU_META_RR0_OFFSET,
+ offsetof(struct kvm_vcpu, arch.metaphysical_rr0));
+ DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
+ offsetof(struct kvm_vcpu,
+ arch.metaphysical_saved_rr0));
+ DEFINE(VMM_VCPU_VRR0_OFFSET,
+ offsetof(struct kvm_vcpu, arch.vrr[0]));
+ DEFINE(VMM_VPD_IRR0_OFFSET,
+ offsetof(struct vpd, irr[0]));
+ DEFINE(VMM_VCPU_ITC_CHECK_OFFSET,
+ offsetof(struct kvm_vcpu, arch.itc_check));
+ DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET,
+ offsetof(struct kvm_vcpu, arch.irq_check));
+ DEFINE(VMM_VPD_VHPI_OFFSET,
+ offsetof(struct vpd, vhpi));
+ DEFINE(VMM_VCPU_VSA_BASE_OFFSET,
+ offsetof(struct kvm_vcpu, arch.vsa_base));
+ DEFINE(VMM_VCPU_VPD_OFFSET,
+ offsetof(struct kvm_vcpu, arch.vpd));
+ DEFINE(VMM_VCPU_IRQ_CHECK,
+ offsetof(struct kvm_vcpu, arch.irq_check));
+ DEFINE(VMM_VCPU_TIMER_PENDING,
+ offsetof(struct kvm_vcpu, arch.timer_pending));
+ DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
+ offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0));
+ DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
+ offsetof(struct kvm_vcpu, arch.mode_flags));
+ DEFINE(VMM_VCPU_ITC_OFS_OFFSET,
+ offsetof(struct kvm_vcpu, arch.itc_offset));
+ DEFINE(VMM_VCPU_LAST_ITC_OFFSET,
+ offsetof(struct kvm_vcpu, arch.last_itc));
+ DEFINE(VMM_VCPU_SAVED_GP_OFFSET,
+ offsetof(struct kvm_vcpu, arch.saved_gp));
+
+ BLANK();
+
+ DEFINE(VMM_PT_REGS_B6_OFFSET,
+ offsetof(struct kvm_pt_regs, b6));
+ DEFINE(VMM_PT_REGS_B7_OFFSET,
+ offsetof(struct kvm_pt_regs, b7));
+ DEFINE(VMM_PT_REGS_AR_CSD_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_csd));
+ DEFINE(VMM_PT_REGS_AR_SSD_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_ssd));
+ DEFINE(VMM_PT_REGS_R8_OFFSET,
+ offsetof(struct kvm_pt_regs, r8));
+ DEFINE(VMM_PT_REGS_R9_OFFSET,
+ offsetof(struct kvm_pt_regs, r9));
+ DEFINE(VMM_PT_REGS_R10_OFFSET,
+ offsetof(struct kvm_pt_regs, r10));
+ DEFINE(VMM_PT_REGS_R11_OFFSET,
+ offsetof(struct kvm_pt_regs, r11));
+ DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET,
+ offsetof(struct kvm_pt_regs, cr_ipsr));
+ DEFINE(VMM_PT_REGS_CR_IIP_OFFSET,
+ offsetof(struct kvm_pt_regs, cr_iip));
+ DEFINE(VMM_PT_REGS_CR_IFS_OFFSET,
+ offsetof(struct kvm_pt_regs, cr_ifs));
+ DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_unat));
+ DEFINE(VMM_PT_REGS_AR_PFS_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_pfs));
+ DEFINE(VMM_PT_REGS_AR_RSC_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_rsc));
+ DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_rnat));
+
+ DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_bspstore));
+ DEFINE(VMM_PT_REGS_PR_OFFSET,
+ offsetof(struct kvm_pt_regs, pr));
+ DEFINE(VMM_PT_REGS_B0_OFFSET,
+ offsetof(struct kvm_pt_regs, b0));
+ DEFINE(VMM_PT_REGS_LOADRS_OFFSET,
+ offsetof(struct kvm_pt_regs, loadrs));
+ DEFINE(VMM_PT_REGS_R1_OFFSET,
+ offsetof(struct kvm_pt_regs, r1));
+ DEFINE(VMM_PT_REGS_R12_OFFSET,
+ offsetof(struct kvm_pt_regs, r12));
+ DEFINE(VMM_PT_REGS_R13_OFFSET,
+ offsetof(struct kvm_pt_regs, r13));
+ DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_fpsr));
+ DEFINE(VMM_PT_REGS_R15_OFFSET,
+ offsetof(struct kvm_pt_regs, r15));
+ DEFINE(VMM_PT_REGS_R14_OFFSET,
+ offsetof(struct kvm_pt_regs, r14));
+ DEFINE(VMM_PT_REGS_R2_OFFSET,
+ offsetof(struct kvm_pt_regs, r2));
+ DEFINE(VMM_PT_REGS_R3_OFFSET,
+ offsetof(struct kvm_pt_regs, r3));
+ DEFINE(VMM_PT_REGS_R16_OFFSET,
+ offsetof(struct kvm_pt_regs, r16));
+ DEFINE(VMM_PT_REGS_R17_OFFSET,
+ offsetof(struct kvm_pt_regs, r17));
+ DEFINE(VMM_PT_REGS_R18_OFFSET,
+ offsetof(struct kvm_pt_regs, r18));
+ DEFINE(VMM_PT_REGS_R19_OFFSET,
+ offsetof(struct kvm_pt_regs, r19));
+ DEFINE(VMM_PT_REGS_R20_OFFSET,
+ offsetof(struct kvm_pt_regs, r20));
+ DEFINE(VMM_PT_REGS_R21_OFFSET,
+ offsetof(struct kvm_pt_regs, r21));
+ DEFINE(VMM_PT_REGS_R22_OFFSET,
+ offsetof(struct kvm_pt_regs, r22));
+ DEFINE(VMM_PT_REGS_R23_OFFSET,
+ offsetof(struct kvm_pt_regs, r23));
+ DEFINE(VMM_PT_REGS_R24_OFFSET,
+ offsetof(struct kvm_pt_regs, r24));
+ DEFINE(VMM_PT_REGS_R25_OFFSET,
+ offsetof(struct kvm_pt_regs, r25));
+ DEFINE(VMM_PT_REGS_R26_OFFSET,
+ offsetof(struct kvm_pt_regs, r26));
+ DEFINE(VMM_PT_REGS_R27_OFFSET,
+ offsetof(struct kvm_pt_regs, r27));
+ DEFINE(VMM_PT_REGS_R28_OFFSET,
+ offsetof(struct kvm_pt_regs, r28));
+ DEFINE(VMM_PT_REGS_R29_OFFSET,
+ offsetof(struct kvm_pt_regs, r29));
+ DEFINE(VMM_PT_REGS_R30_OFFSET,
+ offsetof(struct kvm_pt_regs, r30));
+ DEFINE(VMM_PT_REGS_R31_OFFSET,
+ offsetof(struct kvm_pt_regs, r31));
+ DEFINE(VMM_PT_REGS_AR_CCV_OFFSET,
+ offsetof(struct kvm_pt_regs, ar_ccv));
+ DEFINE(VMM_PT_REGS_F6_OFFSET,
+ offsetof(struct kvm_pt_regs, f6));
+ DEFINE(VMM_PT_REGS_F7_OFFSET,
+ offsetof(struct kvm_pt_regs, f7));
+ DEFINE(VMM_PT_REGS_F8_OFFSET,
+ offsetof(struct kvm_pt_regs, f8));
+ DEFINE(VMM_PT_REGS_F9_OFFSET,
+ offsetof(struct kvm_pt_regs, f9));
+ DEFINE(VMM_PT_REGS_F10_OFFSET,
+ offsetof(struct kvm_pt_regs, f10));
+ DEFINE(VMM_PT_REGS_F11_OFFSET,
+ offsetof(struct kvm_pt_regs, f11));
+ DEFINE(VMM_PT_REGS_R4_OFFSET,
+ offsetof(struct kvm_pt_regs, r4));
+ DEFINE(VMM_PT_REGS_R5_OFFSET,
+ offsetof(struct kvm_pt_regs, r5));
+ DEFINE(VMM_PT_REGS_R6_OFFSET,
+ offsetof(struct kvm_pt_regs, r6));
+ DEFINE(VMM_PT_REGS_R7_OFFSET,
+ offsetof(struct kvm_pt_regs, r7));
+ DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET,
+ offsetof(struct kvm_pt_regs, eml_unat));
+ DEFINE(VMM_VCPU_IIPA_OFFSET,
+ offsetof(struct kvm_vcpu, arch.cr_iipa));
+ DEFINE(VMM_VCPU_OPCODE_OFFSET,
+ offsetof(struct kvm_vcpu, arch.opcode));
+ DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause));
+ DEFINE(VMM_VCPU_ISR_OFFSET,
+ offsetof(struct kvm_vcpu, arch.cr_isr));
+ DEFINE(VMM_PT_REGS_R16_SLOT,
+ (((offsetof(struct kvm_pt_regs, r16)
+ - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f));
+ DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
+ offsetof(struct kvm_vcpu, arch.mode_flags));
+ DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp));
+ BLANK();
+
+ DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd));
+ DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs));
+ DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET,
+ offsetof(struct kvm_vcpu, arch.insvc[0]));
+ DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta));
+ DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr));
+
+ DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4]));
+ DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5]));
+ DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12]));
+ DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13]));
+ DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0]));
+ DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1]));
+ DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0]));
+ DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1]));
+ DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2]));
+ DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0]));
+ DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16]));
+ DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18]));
+ DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19]));
+ DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21]));
+ DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24]));
+ DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27]));
+ DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28]));
+ DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29]));
+ DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30]));
+ DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36]));
+ DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40]));
+ DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64]));
+ DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65]));
+ DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0]));
+ DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2]));
+ DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8]));
+ DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0]));
+ DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0]));
+ DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2]));
+ DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3]));
+ DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32]));
+ DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33]));
+ DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0]));
+ DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr));
+ BLANK();
+}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
new file mode 100644
index 00000000000..6df07324013
--- /dev/null
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -0,0 +1,1806 @@
+
+/*
+ * kvm_ia64.c: Basic KVM suppport On Itanium series processors
+ *
+ *
+ * Copyright (C) 2007, Intel Corporation.
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/gfp.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <linux/bitops.h>
+#include <linux/hrtimer.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/gcc_intrin.h>
+#include <asm/pal.h>
+#include <asm/cacheflush.h>
+#include <asm/div64.h>
+#include <asm/tlb.h>
+
+#include "misc.h"
+#include "vti.h"
+#include "iodev.h"
+#include "ioapic.h"
+#include "lapic.h"
+
+static unsigned long kvm_vmm_base;
+static unsigned long kvm_vsa_base;
+static unsigned long kvm_vm_buffer;
+static unsigned long kvm_vm_buffer_size;
+unsigned long kvm_vmm_gp;
+
+static long vp_env_info;
+
+static struct kvm_vmm_info *kvm_vmm_info;
+
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { NULL }
+};
+
+
+struct fdesc{
+ unsigned long ip;
+ unsigned long gp;
+};
+
+static void kvm_flush_icache(unsigned long start, unsigned long len)
+{
+ int l;
+
+ for (l = 0; l < (len + 32); l += 32)
+ ia64_fc(start + l);
+
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
+static void kvm_flush_tlb_all(void)
+{
+ unsigned long i, j, count0, count1, stride0, stride1, addr;
+ long flags;
+
+ addr = local_cpu_data->ptce_base;
+ count0 = local_cpu_data->ptce_count[0];
+ count1 = local_cpu_data->ptce_count[1];
+ stride0 = local_cpu_data->ptce_stride[0];
+ stride1 = local_cpu_data->ptce_stride[1];
+
+ local_irq_save(flags);
+ for (i = 0; i < count0; ++i) {
+ for (j = 0; j < count1; ++j) {
+ ia64_ptce(addr);
+ addr += stride1;
+ }
+ addr += stride0;
+ }
+ local_irq_restore(flags);
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+}
+
+long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
+ (u64)opt_handler);
+
+ return iprv.status;
+}
+
+static DEFINE_SPINLOCK(vp_lock);
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+ long status;
+ long tmp_base;
+ unsigned long pte;
+ unsigned long saved_psr;
+ int slot;
+
+ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
+ PAGE_KERNEL));
+ local_irq_save(saved_psr);
+ slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
+ if (slot < 0)
+ return;
+ local_irq_restore(saved_psr);
+
+ spin_lock(&vp_lock);
+ status = ia64_pal_vp_init_env(kvm_vsa_base ?
+ VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
+ __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
+ if (status != 0) {
+ printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
+ return ;
+ }
+
+ if (!kvm_vsa_base) {
+ kvm_vsa_base = tmp_base;
+ printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
+ }
+ spin_unlock(&vp_lock);
+ ia64_ptr_entry(0x3, slot);
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+
+ long status;
+ int slot;
+ unsigned long pte;
+ unsigned long saved_psr;
+ unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
+
+ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
+ PAGE_KERNEL));
+
+ local_irq_save(saved_psr);
+ slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
+ if (slot < 0)
+ return;
+ local_irq_restore(saved_psr);
+
+ status = ia64_pal_vp_exit_env(host_iva);
+ if (status)
+ printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
+ status);
+ ia64_ptr_entry(0x3, slot);
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ *(int *)rtn = 0;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_IRQCHIP:
+ case KVM_CAP_USER_MEMORY:
+
+ r = 1;
+ break;
+ default:
+ r = 0;
+ }
+ return r;
+
+}
+
+static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
+ gpa_t addr)
+{
+ struct kvm_io_device *dev;
+
+ dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+
+ return dev;
+}
+
+static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_run->hw.hardware_exit_reason = 1;
+ return 0;
+}
+
+static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct kvm_mmio_req *p;
+ struct kvm_io_device *mmio_dev;
+
+ p = kvm_get_vcpu_ioreq(vcpu);
+
+ if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
+ goto mmio;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
+ vcpu->mmio_size = kvm_run->mmio.len = p->size;
+ vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
+
+ if (vcpu->mmio_is_write)
+ memcpy(vcpu->mmio_data, &p->data, p->size);
+ memcpy(kvm_run->mmio.data, &p->data, p->size);
+ kvm_run->exit_reason = KVM_EXIT_MMIO;
+ return 0;
+mmio:
+ mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr);
+ if (mmio_dev) {
+ if (!p->dir)
+ kvm_iodevice_write(mmio_dev, p->addr, p->size,
+ &p->data);
+ else
+ kvm_iodevice_read(mmio_dev, p->addr, p->size,
+ &p->data);
+
+ } else
+ printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
+ p->state = STATE_IORESP_READY;
+
+ return 1;
+}
+
+static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+
+ if (p->exit_reason == EXIT_REASON_PAL_CALL)
+ return kvm_pal_emul(vcpu, kvm_run);
+ else {
+ kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_run->hw.hardware_exit_reason = 2;
+ return 0;
+ }
+}
+
+static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+
+ if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+ kvm_sal_emul(vcpu);
+ return 1;
+ } else {
+ kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_run->hw.hardware_exit_reason = 3;
+ return 0;
+ }
+
+}
+
+/*
+ * offset: address offset to IPI space.
+ * value: deliver value.
+ */
+static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
+ uint64_t vector)
+{
+ switch (dm) {
+ case SAPIC_FIXED:
+ kvm_apic_set_irq(vcpu, vector, 0);
+ break;
+ case SAPIC_NMI:
+ kvm_apic_set_irq(vcpu, 2, 0);
+ break;
+ case SAPIC_EXTINT:
+ kvm_apic_set_irq(vcpu, 0, 0);
+ break;
+ case SAPIC_INIT:
+ case SAPIC_PMI:
+ default:
+ printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
+ break;
+ }
+}
+
+static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
+ unsigned long eid)
+{
+ union ia64_lid lid;
+ int i;
+
+ for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ if (kvm->vcpus[i]) {
+ lid.val = VCPU_LID(kvm->vcpus[i]);
+ if (lid.id == id && lid.eid == eid)
+ return kvm->vcpus[i];
+ }
+ }
+
+ return NULL;
+}
+
+static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
+ struct kvm_vcpu *target_vcpu;
+ struct kvm_pt_regs *regs;
+ union ia64_ipi_a addr = p->u.ipi_data.addr;
+ union ia64_ipi_d data = p->u.ipi_data.data;
+
+ target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
+ if (!target_vcpu)
+ return handle_vm_error(vcpu, kvm_run);
+
+ if (!target_vcpu->arch.launched) {
+ regs = vcpu_regs(target_vcpu);
+
+ regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
+ regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
+
+ target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(&target_vcpu->wq))
+ wake_up_interruptible(&target_vcpu->wq);
+ } else {
+ vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
+ if (target_vcpu != vcpu)
+ kvm_vcpu_kick(target_vcpu);
+ }
+
+ return 1;
+}
+
+struct call_data {
+ struct kvm_ptc_g ptc_g_data;
+ struct kvm_vcpu *vcpu;
+};
+
+static void vcpu_global_purge(void *info)
+{
+ struct call_data *p = (struct call_data *)info;
+ struct kvm_vcpu *vcpu = p->vcpu;
+
+ if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
+ return;
+
+ set_bit(KVM_REQ_PTC_G, &vcpu->requests);
+ if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
+ vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
+ p->ptc_g_data;
+ } else {
+ clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
+ vcpu->arch.ptc_g_count = 0;
+ set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
+ }
+}
+
+static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
+ struct kvm *kvm = vcpu->kvm;
+ struct call_data call_data;
+ int i;
+ call_data.ptc_g_data = p->u.ptc_g_data;
+
+ for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
+ KVM_MP_STATE_UNINITIALIZED ||
+ vcpu == kvm->vcpus[i])
+ continue;
+
+ if (waitqueue_active(&kvm->vcpus[i]->wq))
+ wake_up_interruptible(&kvm->vcpus[i]->wq);
+
+ if (kvm->vcpus[i]->cpu != -1) {
+ call_data.vcpu = kvm->vcpus[i];
+ smp_call_function_single(kvm->vcpus[i]->cpu,
+ vcpu_global_purge, &call_data, 0, 1);
+ } else
+ printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
+
+ }
+ return 1;
+}
+
+static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ return 1;
+}
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+
+ ktime_t kt;
+ long itc_diff;
+ unsigned long vcpu_now_itc;
+
+ unsigned long expires;
+ struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
+ unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+
+ if (time_after(vcpu_now_itc, vpd->itm)) {
+ vcpu->arch.timer_check = 1;
+ return 1;
+ }
+ itc_diff = vpd->itm - vcpu_now_itc;
+ if (itc_diff < 0)
+ itc_diff = -itc_diff;
+
+ expires = div64_64(itc_diff, cyc_per_usec);
+ kt = ktime_set(0, 1000 * expires);
+ vcpu->arch.ht_active = 1;
+ hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
+
+ if (irqchip_in_kernel(vcpu->kvm)) {
+ vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+ kvm_vcpu_block(vcpu);
+ hrtimer_cancel(p_ht);
+ vcpu->arch.ht_active = 0;
+
+ if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
+ return -EINTR;
+ return 1;
+ } else {
+ printk(KERN_ERR"kvm: Unsupported userspace halt!");
+ return 0;
+ }
+}
+
+static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+ return 0;
+}
+
+static int handle_external_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ return 1;
+}
+
+static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run) = {
+ [EXIT_REASON_VM_PANIC] = handle_vm_error,
+ [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
+ [EXIT_REASON_PAL_CALL] = handle_pal_call,
+ [EXIT_REASON_SAL_CALL] = handle_sal_call,
+ [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
+ [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
+ [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
+ [EXIT_REASON_IPI] = handle_ipi,
+ [EXIT_REASON_PTC_G] = handle_global_purge,
+
+};
+
+static const int kvm_vti_max_exit_handlers =
+ sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
+
+static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
+{
+}
+
+static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p_exit_data;
+
+ p_exit_data = kvm_get_exit_data(vcpu);
+ return p_exit_data->exit_reason;
+}
+
+/*
+ * The guest has exited. See if we can fix it or if we need userspace
+ * assistance.
+ */
+static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ u32 exit_reason = kvm_get_exit_reason(vcpu);
+ vcpu->arch.last_exit = exit_reason;
+
+ if (exit_reason < kvm_vti_max_exit_handlers
+ && kvm_vti_exit_handlers[exit_reason])
+ return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
+ else {
+ kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+ kvm_run->hw.hardware_exit_reason = exit_reason;
+ }
+ return 0;
+}
+
+static inline void vti_set_rr6(unsigned long rr6)
+{
+ ia64_set_rr(RR6, rr6);
+ ia64_srlz_i();
+}
+
+static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
+{
+ unsigned long pte;
+ struct kvm *kvm = vcpu->kvm;
+ int r;
+
+ /*Insert a pair of tr to map vmm*/
+ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
+ r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
+ if (r < 0)
+ goto out;
+ vcpu->arch.vmm_tr_slot = r;
+ /*Insert a pairt of tr to map data of vm*/
+ pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
+ r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
+ pte, KVM_VM_DATA_SHIFT);
+ if (r < 0)
+ goto out;
+ vcpu->arch.vm_tr_slot = r;
+ r = 0;
+out:
+ return r;
+
+}
+
+static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
+{
+
+ ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
+ ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
+
+}
+
+static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+
+ if (vcpu->arch.last_run_cpu != cpu ||
+ per_cpu(last_vcpu, cpu) != vcpu) {
+ per_cpu(last_vcpu, cpu) = vcpu;
+ vcpu->arch.last_run_cpu = cpu;
+ kvm_flush_tlb_all();
+ }
+
+ vcpu->arch.host_rr6 = ia64_get_rr(RR6);
+ vti_set_rr6(vcpu->arch.vmm_rr);
+ return kvm_insert_vmm_mapping(vcpu);
+}
+static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
+{
+ kvm_purge_vmm_mapping(vcpu);
+ vti_set_rr6(vcpu->arch.host_rr6);
+}
+
+static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ union context *host_ctx, *guest_ctx;
+ int r;
+
+ /*Get host and guest context with guest address space.*/
+ host_ctx = kvm_get_host_context(vcpu);
+ guest_ctx = kvm_get_guest_context(vcpu);
+
+ r = kvm_vcpu_pre_transition(vcpu);
+ if (r < 0)
+ goto out;
+ kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
+ kvm_vcpu_post_transition(vcpu);
+ r = 0;
+out:
+ return r;
+}
+
+static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ int r;
+
+again:
+ preempt_disable();
+
+ kvm_prepare_guest_switch(vcpu);
+ local_irq_disable();
+
+ if (signal_pending(current)) {
+ local_irq_enable();
+ preempt_enable();
+ r = -EINTR;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ goto out;
+ }
+
+ vcpu->guest_mode = 1;
+ kvm_guest_enter();
+
+ r = vti_vcpu_run(vcpu, kvm_run);
+ if (r < 0) {
+ local_irq_enable();
+ preempt_enable();
+ kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ goto out;
+ }
+
+ vcpu->arch.launched = 1;
+ vcpu->guest_mode = 0;
+ local_irq_enable();
+
+ /*
+ * We must have an instruction between local_irq_enable() and
+ * kvm_guest_exit(), so the timer interrupt isn't delayed by
+ * the interrupt shadow. The stat.exits increment will do nicely.
+ * But we need to prevent reordering, hence this barrier():
+ */
+ barrier();
+
+ kvm_guest_exit();
+
+ preempt_enable();
+
+ r = kvm_handle_exit(kvm_run, vcpu);
+
+ if (r > 0) {
+ if (!need_resched())
+ goto again;
+ }
+
+out:
+ if (r > 0) {
+ kvm_resched(vcpu);
+ goto again;
+ }
+
+ return r;
+}
+
+static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
+
+ if (!vcpu->mmio_is_write)
+ memcpy(&p->data, vcpu->mmio_data, 8);
+ p->state = STATE_IORESP_READY;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ int r;
+ sigset_t sigsaved;
+
+ vcpu_load(vcpu);
+
+ if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
+ kvm_vcpu_block(vcpu);
+ vcpu_put(vcpu);
+ return -EAGAIN;
+ }
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ if (vcpu->mmio_needed) {
+ memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+ kvm_set_mmio_data(vcpu);
+ vcpu->mmio_read_completed = 1;
+ vcpu->mmio_needed = 0;
+ }
+ r = __vcpu_run(vcpu, kvm_run);
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+ vcpu_put(vcpu);
+ return r;
+}
+
+/*
+ * Allocate 16M memory for every vm to hold its specific data.
+ * Its memory map is defined in kvm_host.h.
+ */
+static struct kvm *kvm_alloc_kvm(void)
+{
+
+ struct kvm *kvm;
+ uint64_t vm_base;
+
+ vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
+
+ if (!vm_base)
+ return ERR_PTR(-ENOMEM);
+ printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
+
+ /* Zero all pages before use! */
+ memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
+
+ kvm = (struct kvm *)(vm_base + KVM_VM_OFS);
+ kvm->arch.vm_base = vm_base;
+
+ return kvm;
+}
+
+struct kvm_io_range {
+ unsigned long start;
+ unsigned long size;
+ unsigned long type;
+};
+
+static const struct kvm_io_range io_ranges[] = {
+ {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
+ {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
+ {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
+ {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
+ {PIB_START, PIB_SIZE, GPFN_PIB},
+};
+
+static void kvm_build_io_pmt(struct kvm *kvm)
+{
+ unsigned long i, j;
+
+ /* Mark I/O ranges */
+ for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
+ i++) {
+ for (j = io_ranges[i].start;
+ j < io_ranges[i].start + io_ranges[i].size;
+ j += PAGE_SIZE)
+ kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
+ io_ranges[i].type, 0);
+ }
+
+}
+
+/*Use unused rids to virtualize guest rid.*/
+#define GUEST_PHYSICAL_RR0 0x1739
+#define GUEST_PHYSICAL_RR4 0x2739
+#define VMM_INIT_RR 0x1660
+
+static void kvm_init_vm(struct kvm *kvm)
+{
+ long vm_base;
+
+ BUG_ON(!kvm);
+
+ kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
+ kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
+ kvm->arch.vmm_init_rr = VMM_INIT_RR;
+
+ vm_base = kvm->arch.vm_base;
+ if (vm_base) {
+ kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
+ kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
+ kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
+ }
+
+ /*
+ *Fill P2M entries for MMIO/IO ranges
+ */
+ kvm_build_io_pmt(kvm);
+
+}
+
+struct kvm *kvm_arch_create_vm(void)
+{
+ struct kvm *kvm = kvm_alloc_kvm();
+
+ if (IS_ERR(kvm))
+ return ERR_PTR(-ENOMEM);
+ kvm_init_vm(kvm);
+
+ return kvm;
+
+}
+
+static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
+ struct kvm_irqchip *chip)
+{
+ int r;
+
+ r = 0;
+ switch (chip->chip_id) {
+ case KVM_IRQCHIP_IOAPIC:
+ memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
+ sizeof(struct kvm_ioapic_state));
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
+static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
+{
+ int r;
+
+ r = 0;
+ switch (chip->chip_id) {
+ case KVM_IRQCHIP_IOAPIC:
+ memcpy(ioapic_irqchip(kvm),
+ &chip->chip.ioapic,
+ sizeof(struct kvm_ioapic_state));
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
+#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+ int r;
+
+ vcpu_load(vcpu);
+
+ for (i = 0; i < 16; i++) {
+ vpd->vgr[i] = regs->vpd.vgr[i];
+ vpd->vbgr[i] = regs->vpd.vbgr[i];
+ }
+ for (i = 0; i < 128; i++)
+ vpd->vcr[i] = regs->vpd.vcr[i];
+ vpd->vhpi = regs->vpd.vhpi;
+ vpd->vnat = regs->vpd.vnat;
+ vpd->vbnat = regs->vpd.vbnat;
+ vpd->vpsr = regs->vpd.vpsr;
+
+ vpd->vpr = regs->vpd.vpr;
+
+ r = -EFAULT;
+ r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
+ sizeof(union context));
+ if (r)
+ goto out;
+ r = copy_from_user(vcpu + 1, regs->saved_stack +
+ sizeof(struct kvm_vcpu),
+ IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
+ if (r)
+ goto out;
+ vcpu->arch.exit_data =
+ ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
+
+ RESTORE_REGS(mp_state);
+ RESTORE_REGS(vmm_rr);
+ memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
+ memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
+ RESTORE_REGS(itr_regions);
+ RESTORE_REGS(dtr_regions);
+ RESTORE_REGS(tc_regions);
+ RESTORE_REGS(irq_check);
+ RESTORE_REGS(itc_check);
+ RESTORE_REGS(timer_check);
+ RESTORE_REGS(timer_pending);
+ RESTORE_REGS(last_itc);
+ for (i = 0; i < 8; i++) {
+ vcpu->arch.vrr[i] = regs->vrr[i];
+ vcpu->arch.ibr[i] = regs->ibr[i];
+ vcpu->arch.dbr[i] = regs->dbr[i];
+ }
+ for (i = 0; i < 4; i++)
+ vcpu->arch.insvc[i] = regs->insvc[i];
+ RESTORE_REGS(xtp);
+ RESTORE_REGS(metaphysical_rr0);
+ RESTORE_REGS(metaphysical_rr4);
+ RESTORE_REGS(metaphysical_saved_rr0);
+ RESTORE_REGS(metaphysical_saved_rr4);
+ RESTORE_REGS(fp_psr);
+ RESTORE_REGS(saved_gp);
+
+ vcpu->arch.irq_new_pending = 1;
+ vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
+ set_bit(KVM_REQ_RESUME, &vcpu->requests);
+
+ vcpu_put(vcpu);
+ r = 0;
+out:
+ return r;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm *kvm = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ int r = -EINVAL;
+
+ switch (ioctl) {
+ case KVM_SET_MEMORY_REGION: {
+ struct kvm_memory_region kvm_mem;
+ struct kvm_userspace_memory_region kvm_userspace_mem;
+
+ r = -EFAULT;
+ if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
+ goto out;
+ kvm_userspace_mem.slot = kvm_mem.slot;
+ kvm_userspace_mem.flags = kvm_mem.flags;
+ kvm_userspace_mem.guest_phys_addr =
+ kvm_mem.guest_phys_addr;
+ kvm_userspace_mem.memory_size = kvm_mem.memory_size;
+ r = kvm_vm_ioctl_set_memory_region(kvm,
+ &kvm_userspace_mem, 0);
+ if (r)
+ goto out;
+ break;
+ }
+ case KVM_CREATE_IRQCHIP:
+ r = -EFAULT;
+ r = kvm_ioapic_init(kvm);
+ if (r)
+ goto out;
+ break;
+ case KVM_IRQ_LINE: {
+ struct kvm_irq_level irq_event;
+
+ r = -EFAULT;
+ if (copy_from_user(&irq_event, argp, sizeof irq_event))
+ goto out;
+ if (irqchip_in_kernel(kvm)) {
+ mutex_lock(&kvm->lock);
+ kvm_ioapic_set_irq(kvm->arch.vioapic,
+ irq_event.irq,
+ irq_event.level);
+ mutex_unlock(&kvm->lock);
+ r = 0;
+ }
+ break;
+ }
+ case KVM_GET_IRQCHIP: {
+ /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
+ struct kvm_irqchip chip;
+
+ r = -EFAULT;
+ if (copy_from_user(&chip, argp, sizeof chip))
+ goto out;
+ r = -ENXIO;
+ if (!irqchip_in_kernel(kvm))
+ goto out;
+ r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
+ if (r)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(argp, &chip, sizeof chip))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_IRQCHIP: {
+ /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
+ struct kvm_irqchip chip;
+
+ r = -EFAULT;
+ if (copy_from_user(&chip, argp, sizeof chip))
+ goto out;
+ r = -ENXIO;
+ if (!irqchip_in_kernel(kvm))
+ goto out;
+ r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
+ if (r)
+ goto out;
+ r = 0;
+ break;
+ }
+ default:
+ ;
+ }
+out:
+ return r;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -EINVAL;
+
+}
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+
+ return -EINVAL;
+}
+
+static int kvm_alloc_vmm_area(void)
+{
+ if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
+ kvm_vmm_base = __get_free_pages(GFP_KERNEL,
+ get_order(KVM_VMM_SIZE));
+ if (!kvm_vmm_base)
+ return -ENOMEM;
+
+ memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
+ kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
+
+ printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
+ kvm_vmm_base, kvm_vm_buffer);
+ }
+
+ return 0;
+}
+
+static void kvm_free_vmm_area(void)
+{
+ if (kvm_vmm_base) {
+ /*Zero this area before free to avoid bits leak!!*/
+ memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
+ free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
+ kvm_vmm_base = 0;
+ kvm_vm_buffer = 0;
+ kvm_vsa_base = 0;
+ }
+}
+
+/*
+ * Make sure that a cpu that is being hot-unplugged does not have any vcpus
+ * cached on it. Leave it as blank for IA64.
+ */
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
+static int vti_init_vpd(struct kvm_vcpu *vcpu)
+{
+ int i;
+ union cpuid3_t cpuid3;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ if (IS_ERR(vpd))
+ return PTR_ERR(vpd);
+
+ /* CPUID init */
+ for (i = 0; i < 5; i++)
+ vpd->vcpuid[i] = ia64_get_cpuid(i);
+
+ /* Limit the CPUID number to 5 */
+ cpuid3.value = vpd->vcpuid[3];
+ cpuid3.number = 4; /* 5 - 1 */
+ vpd->vcpuid[3] = cpuid3.value;
+
+ /*Set vac and vdc fields*/
+ vpd->vac.a_from_int_cr = 1;
+ vpd->vac.a_to_int_cr = 1;
+ vpd->vac.a_from_psr = 1;
+ vpd->vac.a_from_cpuid = 1;
+ vpd->vac.a_cover = 1;
+ vpd->vac.a_bsw = 1;
+ vpd->vac.a_int = 1;
+ vpd->vdc.d_vmsw = 1;
+
+ /*Set virtual buffer*/
+ vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
+
+ return 0;
+}
+
+static int vti_create_vp(struct kvm_vcpu *vcpu)
+{
+ long ret;
+ struct vpd *vpd = vcpu->arch.vpd;
+ unsigned long vmm_ivt;
+
+ vmm_ivt = kvm_vmm_info->vmm_ivt;
+
+ printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
+
+ ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
+
+ if (ret) {
+ printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void init_ptce_info(struct kvm_vcpu *vcpu)
+{
+ ia64_ptce_info_t ptce = {0};
+
+ ia64_get_ptce(&ptce);
+ vcpu->arch.ptce_base = ptce.base;
+ vcpu->arch.ptce_count[0] = ptce.count[0];
+ vcpu->arch.ptce_count[1] = ptce.count[1];
+ vcpu->arch.ptce_stride[0] = ptce.stride[0];
+ vcpu->arch.ptce_stride[1] = ptce.stride[1];
+}
+
+static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
+{
+ struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
+
+ if (hrtimer_cancel(p_ht))
+ hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS);
+}
+
+static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
+{
+ struct kvm_vcpu *vcpu;
+ wait_queue_head_t *q;
+
+ vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
+ if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
+ goto out;
+
+ q = &vcpu->wq;
+ if (waitqueue_active(q)) {
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ wake_up_interruptible(q);
+ }
+out:
+ vcpu->arch.timer_check = 1;
+ return HRTIMER_NORESTART;
+}
+
+#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *v;
+ int r;
+ int i;
+ long itc_offset;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ union context *p_ctx = &vcpu->arch.guest;
+ struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
+
+ /*Init vcpu context for first run.*/
+ if (IS_ERR(vmm_vcpu))
+ return PTR_ERR(vmm_vcpu);
+
+ if (vcpu->vcpu_id == 0) {
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+
+ /*Set entry address for first run.*/
+ regs->cr_iip = PALE_RESET_ENTRY;
+
+ /*Initilize itc offset for vcpus*/
+ itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
+ for (i = 0; i < MAX_VCPU_NUM; i++) {
+ v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
+ v->arch.itc_offset = itc_offset;
+ v->arch.last_itc = 0;
+ }
+ } else
+ vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
+
+ r = -ENOMEM;
+ vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
+ if (!vcpu->arch.apic)
+ goto out;
+ vcpu->arch.apic->vcpu = vcpu;
+
+ p_ctx->gr[1] = 0;
+ p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET);
+ p_ctx->gr[13] = (unsigned long)vmm_vcpu;
+ p_ctx->psr = 0x1008522000UL;
+ p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
+ p_ctx->caller_unat = 0;
+ p_ctx->pr = 0x0;
+ p_ctx->ar[36] = 0x0; /*unat*/
+ p_ctx->ar[19] = 0x0; /*rnat*/
+ p_ctx->ar[18] = (unsigned long)vmm_vcpu +
+ ((sizeof(struct kvm_vcpu)+15) & ~15);
+ p_ctx->ar[64] = 0x0; /*pfs*/
+ p_ctx->cr[0] = 0x7e04UL;
+ p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
+ p_ctx->cr[8] = 0x3c;
+
+ /*Initilize region register*/
+ p_ctx->rr[0] = 0x30;
+ p_ctx->rr[1] = 0x30;
+ p_ctx->rr[2] = 0x30;
+ p_ctx->rr[3] = 0x30;
+ p_ctx->rr[4] = 0x30;
+ p_ctx->rr[5] = 0x30;
+ p_ctx->rr[7] = 0x30;
+
+ /*Initilize branch register 0*/
+ p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
+
+ vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
+ vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
+ vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
+
+ hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ vcpu->arch.hlt_timer.function = hlt_timer_fn;
+
+ vcpu->arch.last_run_cpu = -1;
+ vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id);
+ vcpu->arch.vsa_base = kvm_vsa_base;
+ vcpu->arch.__gp = kvm_vmm_gp;
+ vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
+ vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id);
+ vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id);
+ init_ptce_info(vcpu);
+
+ r = 0;
+out:
+ return r;
+}
+
+static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
+{
+ unsigned long psr;
+ int r;
+
+ local_irq_save(psr);
+ r = kvm_insert_vmm_mapping(vcpu);
+ if (r)
+ goto fail;
+ r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
+ if (r)
+ goto fail;
+
+ r = vti_init_vpd(vcpu);
+ if (r) {
+ printk(KERN_DEBUG"kvm: vpd init error!!\n");
+ goto uninit;
+ }
+
+ r = vti_create_vp(vcpu);
+ if (r)
+ goto uninit;
+
+ kvm_purge_vmm_mapping(vcpu);
+ local_irq_restore(psr);
+
+ return 0;
+uninit:
+ kvm_vcpu_uninit(vcpu);
+fail:
+ return r;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ unsigned int id)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long vm_base = kvm->arch.vm_base;
+ int r;
+ int cpu;
+
+ r = -ENOMEM;
+ if (!vm_base) {
+ printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
+ goto fail;
+ }
+ vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id);
+ vcpu->kvm = kvm;
+
+ cpu = get_cpu();
+ vti_vcpu_load(vcpu, cpu);
+ r = vti_vcpu_setup(vcpu, id);
+ put_cpu();
+
+ if (r) {
+ printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
+ goto fail;
+ }
+
+ return vcpu;
+fail:
+ return ERR_PTR(r);
+}
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
+{
+ return -EINVAL;
+}
+
+static void free_kvm(struct kvm *kvm)
+{
+ unsigned long vm_base = kvm->arch.vm_base;
+
+ if (vm_base) {
+ memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
+ free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
+ }
+
+}
+
+static void kvm_release_vm_pages(struct kvm *kvm)
+{
+ struct kvm_memory_slot *memslot;
+ int i, j;
+ unsigned long base_gfn;
+
+ for (i = 0; i < kvm->nmemslots; i++) {
+ memslot = &kvm->memslots[i];
+ base_gfn = memslot->base_gfn;
+
+ for (j = 0; j < memslot->npages; j++) {
+ if (memslot->rmap[j])
+ put_page((struct page *)memslot->rmap[j]);
+ }
+ }
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kfree(kvm->arch.vioapic);
+ kvm_release_vm_pages(kvm);
+ kvm_free_physmem(kvm);
+ free_kvm(kvm);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ if (cpu != vcpu->cpu) {
+ vcpu->cpu = cpu;
+ if (vcpu->arch.ht_active)
+ kvm_migrate_hlt_timer(vcpu);
+ }
+}
+
+#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+ int r;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+ vcpu_load(vcpu);
+
+ for (i = 0; i < 16; i++) {
+ regs->vpd.vgr[i] = vpd->vgr[i];
+ regs->vpd.vbgr[i] = vpd->vbgr[i];
+ }
+ for (i = 0; i < 128; i++)
+ regs->vpd.vcr[i] = vpd->vcr[i];
+ regs->vpd.vhpi = vpd->vhpi;
+ regs->vpd.vnat = vpd->vnat;
+ regs->vpd.vbnat = vpd->vbnat;
+ regs->vpd.vpsr = vpd->vpsr;
+ regs->vpd.vpr = vpd->vpr;
+
+ r = -EFAULT;
+ r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
+ sizeof(union context));
+ if (r)
+ goto out;
+ r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
+ if (r)
+ goto out;
+ SAVE_REGS(mp_state);
+ SAVE_REGS(vmm_rr);
+ memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
+ memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
+ SAVE_REGS(itr_regions);
+ SAVE_REGS(dtr_regions);
+ SAVE_REGS(tc_regions);
+ SAVE_REGS(irq_check);
+ SAVE_REGS(itc_check);
+ SAVE_REGS(timer_check);
+ SAVE_REGS(timer_pending);
+ SAVE_REGS(last_itc);
+ for (i = 0; i < 8; i++) {
+ regs->vrr[i] = vcpu->arch.vrr[i];
+ regs->ibr[i] = vcpu->arch.ibr[i];
+ regs->dbr[i] = vcpu->arch.dbr[i];
+ }
+ for (i = 0; i < 4; i++)
+ regs->insvc[i] = vcpu->arch.insvc[i];
+ regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
+ SAVE_REGS(xtp);
+ SAVE_REGS(metaphysical_rr0);
+ SAVE_REGS(metaphysical_rr4);
+ SAVE_REGS(metaphysical_saved_rr0);
+ SAVE_REGS(metaphysical_saved_rr4);
+ SAVE_REGS(fp_psr);
+ SAVE_REGS(saved_gp);
+ vcpu_put(vcpu);
+ r = 0;
+out:
+ return r;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+
+ hrtimer_cancel(&vcpu->arch.hlt_timer);
+ kfree(vcpu->arch.apic);
+}
+
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ unsigned long i;
+ struct page *page;
+ int npages = mem->memory_size >> PAGE_SHIFT;
+ struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+ unsigned long base_gfn = memslot->base_gfn;
+
+ for (i = 0; i < npages; i++) {
+ page = gfn_to_page(kvm, base_gfn + i);
+ kvm_set_pmt_entry(kvm, base_gfn + i,
+ page_to_pfn(page) << PAGE_SHIFT,
+ _PAGE_AR_RWX|_PAGE_MA_WB);
+ memslot->rmap[i] = (unsigned long)page;
+ }
+
+ return 0;
+}
+
+
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_uninit(vcpu);
+}
+
+static int vti_cpu_has_kvm_support(void)
+{
+ long avail = 1, status = 1, control = 1;
+ long ret;
+
+ ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
+ if (ret)
+ goto out;
+
+ if (!(avail & PAL_PROC_VM_BIT))
+ goto out;
+
+ printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
+
+ ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
+ if (ret)
+ goto out;
+ printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
+
+ if (!(vp_env_info & VP_OPCODE)) {
+ printk(KERN_WARNING"kvm: No opcode ability on hardware, "
+ "vm_env_info:0x%lx\n", vp_env_info);
+ }
+
+ return 1;
+out:
+ return 0;
+}
+
+static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
+ struct module *module)
+{
+ unsigned long module_base;
+ unsigned long vmm_size;
+
+ unsigned long vmm_offset, func_offset, fdesc_offset;
+ struct fdesc *p_fdesc;
+
+ BUG_ON(!module);
+
+ if (!kvm_vmm_base) {
+ printk("kvm: kvm area hasn't been initilized yet!!\n");
+ return -EFAULT;
+ }
+
+ /*Calculate new position of relocated vmm module.*/
+ module_base = (unsigned long)module->module_core;
+ vmm_size = module->core_size;
+ if (unlikely(vmm_size > KVM_VMM_SIZE))
+ return -EFAULT;
+
+ memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
+ kvm_flush_icache(kvm_vmm_base, vmm_size);
+
+ /*Recalculate kvm_vmm_info based on new VMM*/
+ vmm_offset = vmm_info->vmm_ivt - module_base;
+ kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
+ printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
+ kvm_vmm_info->vmm_ivt);
+
+ fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
+ kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
+ fdesc_offset);
+ func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
+ p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
+ p_fdesc->ip = KVM_VMM_BASE + func_offset;
+ p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
+
+ printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
+ KVM_VMM_BASE+func_offset);
+
+ fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
+ kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
+ fdesc_offset);
+ func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
+ p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
+ p_fdesc->ip = KVM_VMM_BASE + func_offset;
+ p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
+
+ kvm_vmm_gp = p_fdesc->gp;
+
+ printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
+ kvm_vmm_info->vmm_entry);
+ printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
+ KVM_VMM_BASE + func_offset);
+
+ return 0;
+}
+
+int kvm_arch_init(void *opaque)
+{
+ int r;
+ struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
+
+ if (!vti_cpu_has_kvm_support()) {
+ printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (kvm_vmm_info) {
+ printk(KERN_ERR "kvm: Already loaded VMM module!\n");
+ r = -EEXIST;
+ goto out;
+ }
+
+ r = -ENOMEM;
+ kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
+ if (!kvm_vmm_info)
+ goto out;
+
+ if (kvm_alloc_vmm_area())
+ goto out_free0;
+
+ r = kvm_relocate_vmm(vmm_info, vmm_info->module);
+ if (r)
+ goto out_free1;
+
+ return 0;
+
+out_free1:
+ kvm_free_vmm_area();
+out_free0:
+ kfree(kvm_vmm_info);
+out:
+ return r;
+}
+
+void kvm_arch_exit(void)
+{
+ kvm_free_vmm_area();
+ kfree(kvm_vmm_info);
+ kvm_vmm_info = NULL;
+}
+
+static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ struct kvm_memory_slot *memslot;
+ int r, i;
+ long n, base;
+ unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS
+ + KVM_MEM_DIRTY_LOG_OFS);
+
+ r = -EINVAL;
+ if (log->slot >= KVM_MEMORY_SLOTS)
+ goto out;
+
+ memslot = &kvm->memslots[log->slot];
+ r = -ENOENT;
+ if (!memslot->dirty_bitmap)
+ goto out;
+
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ base = memslot->base_gfn / BITS_PER_LONG;
+
+ for (i = 0; i < n/sizeof(long); ++i) {
+ memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
+ dirty_bitmap[base + i] = 0;
+ }
+ r = 0;
+out:
+ return r;
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ int r;
+ int n;
+ struct kvm_memory_slot *memslot;
+ int is_dirty = 0;
+
+ spin_lock(&kvm->arch.dirty_log_lock);
+
+ r = kvm_ia64_sync_dirty_log(kvm, log);
+ if (r)
+ goto out;
+
+ r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ if (r)
+ goto out;
+
+ /* If nothing is dirty, don't bother messing with page tables. */
+ if (is_dirty) {
+ kvm_flush_remote_tlbs(kvm);
+ memslot = &kvm->memslots[log->slot];
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+ r = 0;
+out:
+ spin_unlock(&kvm->arch.dirty_log_lock);
+ return r;
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+static void vcpu_kick_intr(void *info)
+{
+#ifdef DEBUG
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
+ printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
+#endif
+}
+
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ int ipi_pcpu = vcpu->cpu;
+
+ if (waitqueue_active(&vcpu->wq))
+ wake_up_interruptible(&vcpu->wq);
+
+ if (vcpu->guest_mode)
+ smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
+}
+
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
+{
+
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ if (!test_and_set_bit(vec, &vpd->irr[0])) {
+ vcpu->arch.irq_new_pending = 1;
+ if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
+ kvm_vcpu_kick(vcpu);
+ else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ if (waitqueue_active(&vcpu->wq))
+ wake_up_interruptible(&vcpu->wq);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
+{
+ return apic->vcpu->vcpu_id == dest;
+}
+
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
+ unsigned long bitmap)
+{
+ struct kvm_vcpu *lvcpu = kvm->vcpus[0];
+ int i;
+
+ for (i = 1; i < KVM_MAX_VCPUS; i++) {
+ if (!kvm->vcpus[i])
+ continue;
+ if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
+ lvcpu = kvm->vcpus[i];
+ }
+
+ return lvcpu;
+}
+
+static int find_highest_bits(int *dat)
+{
+ u32 bits, bitnum;
+ int i;
+
+ /* loop for all 256 bits */
+ for (i = 7; i >= 0 ; i--) {
+ bits = dat[i];
+ if (bits) {
+ bitnum = fls(bits);
+ return i * 32 + bitnum - 1;
+ }
+ }
+
+ return -1;
+}
+
+int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
+{
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ if (vpd->irr[0] & (1UL << NMI_VECTOR))
+ return NMI_VECTOR;
+ if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
+ return ExtINT_VECTOR;
+
+ return find_highest_bits((int *)&vpd->irr[0]);
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ if (kvm_highest_pending_irq(vcpu) != -1)
+ return 1;
+ return 0;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
new file mode 100644
index 00000000000..091f936c448
--- /dev/null
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -0,0 +1,500 @@
+/*
+ * PAL/SAL call delegation
+ *
+ * Copyright (c) 2004 Li Susie <susie.li@intel.com>
+ * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
+ * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/smp.h>
+
+#include "vti.h"
+#include "misc.h"
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+#include <asm/tlb.h>
+
+/*
+ * Handy macros to make sure that the PAL return values start out
+ * as something meaningful.
+ */
+#define INIT_PAL_STATUS_UNIMPLEMENTED(x) \
+ { \
+ x.status = PAL_STATUS_UNIMPLEMENTED; \
+ x.v0 = 0; \
+ x.v1 = 0; \
+ x.v2 = 0; \
+ }
+
+#define INIT_PAL_STATUS_SUCCESS(x) \
+ { \
+ x.status = PAL_STATUS_SUCCESS; \
+ x.v0 = 0; \
+ x.v1 = 0; \
+ x.v2 = 0; \
+ }
+
+static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
+ u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
+ struct exit_ctl_data *p;
+
+ if (vcpu) {
+ p = &vcpu->arch.exit_data;
+ if (p->exit_reason == EXIT_REASON_PAL_CALL) {
+ *gr28 = p->u.pal_data.gr28;
+ *gr29 = p->u.pal_data.gr29;
+ *gr30 = p->u.pal_data.gr30;
+ *gr31 = p->u.pal_data.gr31;
+ return ;
+ }
+ }
+ printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
+}
+
+static void set_pal_result(struct kvm_vcpu *vcpu,
+ struct ia64_pal_retval result) {
+
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+ if (p && p->exit_reason == EXIT_REASON_PAL_CALL) {
+ p->u.pal_data.ret = result;
+ return ;
+ }
+ INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
+}
+
+static void set_sal_result(struct kvm_vcpu *vcpu,
+ struct sal_ret_values result) {
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+ if (p && p->exit_reason == EXIT_REASON_SAL_CALL) {
+ p->u.sal_data.ret = result;
+ return ;
+ }
+ printk(KERN_WARNING"Failed to set sal result!!\n");
+}
+
+struct cache_flush_args {
+ u64 cache_type;
+ u64 operation;
+ u64 progress;
+ long status;
+};
+
+cpumask_t cpu_cache_coherent_map;
+
+static void remote_pal_cache_flush(void *data)
+{
+ struct cache_flush_args *args = data;
+ long status;
+ u64 progress = args->progress;
+
+ status = ia64_pal_cache_flush(args->cache_type, args->operation,
+ &progress, NULL);
+ if (status != 0)
+ args->status = status;
+}
+
+static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
+{
+ u64 gr28, gr29, gr30, gr31;
+ struct ia64_pal_retval result = {0, 0, 0, 0};
+ struct cache_flush_args args = {0, 0, 0, 0};
+ long psr;
+
+ gr28 = gr29 = gr30 = gr31 = 0;
+ kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
+
+ if (gr31 != 0)
+ printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
+
+ /* Always call Host Pal in int=1 */
+ gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
+ args.cache_type = gr29;
+ args.operation = gr30;
+ smp_call_function(remote_pal_cache_flush,
+ (void *)&args, 1, 1);
+ if (args.status != 0)
+ printk(KERN_ERR"pal_cache_flush error!,"
+ "status:0x%lx\n", args.status);
+ /*
+ * Call Host PAL cache flush
+ * Clear psr.ic when call PAL_CACHE_FLUSH
+ */
+ local_irq_save(psr);
+ result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
+ &result.v0);
+ local_irq_restore(psr);
+ if (result.status != 0)
+ printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
+ "in1:%lx,in2:%lx\n",
+ vcpu, result.status, gr29, gr30);
+
+#if 0
+ if (gr29 == PAL_CACHE_TYPE_COHERENT) {
+ cpus_setall(vcpu->arch.cache_coherent_map);
+ cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
+ cpus_setall(cpu_cache_coherent_map);
+ cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
+ }
+#endif
+ return result;
+}
+
+struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result;
+
+ PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
+ return result;
+}
+
+static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result;
+
+ PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
+
+ /*
+ * PAL_FREQ_BASE may not be implemented in some platforms,
+ * call SAL instead.
+ */
+ if (result.v0 == 0) {
+ result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+ &result.v0,
+ &result.v1);
+ result.v2 = 0;
+ }
+
+ return result;
+}
+
+static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result;
+
+ PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
+ return result;
+}
+
+static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
+{
+ struct ia64_pal_retval result;
+
+ INIT_PAL_STATUS_UNIMPLEMENTED(result);
+ return result;
+}
+
+static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result;
+
+ INIT_PAL_STATUS_SUCCESS(result);
+ return result;
+}
+
+static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result = {0, 0, 0, 0};
+ long in0, in1, in2, in3;
+
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+ result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
+ &result.v2, in2);
+
+ return result;
+}
+
+static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
+{
+
+ pal_cache_config_info_t ci;
+ long status;
+ unsigned long in0, in1, in2, in3, r9, r10;
+
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+ status = ia64_pal_cache_config_info(in1, in2, &ci);
+ r9 = ci.pcci_info_1.pcci1_data;
+ r10 = ci.pcci_info_2.pcci2_data;
+ return ((struct ia64_pal_retval){status, r9, r10, 0});
+}
+
+#define GUEST_IMPL_VA_MSB 59
+#define GUEST_RID_BITS 18
+
+static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
+{
+
+ pal_vm_info_1_u_t vminfo1;
+ pal_vm_info_2_u_t vminfo2;
+ struct ia64_pal_retval result;
+
+ PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
+ if (!result.status) {
+ vminfo1.pvi1_val = result.v0;
+ vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
+ vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
+ result.v0 = vminfo1.pvi1_val;
+ vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
+ vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
+ result.v1 = vminfo2.pvi2_val;
+ }
+
+ return result;
+}
+
+static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
+{
+ struct ia64_pal_retval result;
+
+ INIT_PAL_STATUS_UNIMPLEMENTED(result);
+
+ return result;
+}
+
+static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
+{
+ u64 index = 0;
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+ if (p && (p->exit_reason == EXIT_REASON_PAL_CALL))
+ index = p->u.pal_data.gr28;
+
+ return index;
+}
+
+int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+
+ u64 gr28;
+ struct ia64_pal_retval result;
+ int ret = 1;
+
+ gr28 = kvm_get_pal_call_index(vcpu);
+ /*printk("pal_call index:%lx\n",gr28);*/
+ switch (gr28) {
+ case PAL_CACHE_FLUSH:
+ result = pal_cache_flush(vcpu);
+ break;
+ case PAL_CACHE_SUMMARY:
+ result = pal_cache_summary(vcpu);
+ break;
+ case PAL_HALT_LIGHT:
+ {
+ vcpu->arch.timer_pending = 1;
+ INIT_PAL_STATUS_SUCCESS(result);
+ if (kvm_highest_pending_irq(vcpu) == -1)
+ ret = kvm_emulate_halt(vcpu);
+
+ }
+ break;
+
+ case PAL_FREQ_RATIOS:
+ result = pal_freq_ratios(vcpu);
+ break;
+
+ case PAL_FREQ_BASE:
+ result = pal_freq_base(vcpu);
+ break;
+
+ case PAL_LOGICAL_TO_PHYSICAL :
+ result = pal_logical_to_physica(vcpu);
+ break;
+
+ case PAL_VM_SUMMARY :
+ result = pal_vm_summary(vcpu);
+ break;
+
+ case PAL_VM_INFO :
+ result = pal_vm_info(vcpu);
+ break;
+ case PAL_PLATFORM_ADDR :
+ result = pal_platform_addr(vcpu);
+ break;
+ case PAL_CACHE_INFO:
+ result = pal_cache_info(vcpu);
+ break;
+ case PAL_PTCE_INFO:
+ INIT_PAL_STATUS_SUCCESS(result);
+ result.v1 = (1L << 32) | 1L;
+ break;
+ case PAL_VM_PAGE_SIZE:
+ result.status = ia64_pal_vm_page_size(&result.v0,
+ &result.v1);
+ break;
+ case PAL_RSE_INFO:
+ result.status = ia64_pal_rse_info(&result.v0,
+ (pal_hints_u_t *)&result.v1);
+ break;
+ case PAL_PROC_GET_FEATURES:
+ result = pal_proc_get_features(vcpu);
+ break;
+ case PAL_DEBUG_INFO:
+ result.status = ia64_pal_debug_info(&result.v0,
+ &result.v1);
+ break;
+ case PAL_VERSION:
+ result.status = ia64_pal_version(
+ (pal_version_u_t *)&result.v0,
+ (pal_version_u_t *)&result.v1);
+
+ break;
+ case PAL_FIXED_ADDR:
+ result.status = PAL_STATUS_SUCCESS;
+ result.v0 = vcpu->vcpu_id;
+ break;
+ default:
+ INIT_PAL_STATUS_UNIMPLEMENTED(result);
+ printk(KERN_WARNING"kvm: Unsupported pal call,"
+ " index:0x%lx\n", gr28);
+ }
+ set_pal_result(vcpu, result);
+ return ret;
+}
+
+static struct sal_ret_values sal_emulator(struct kvm *kvm,
+ long index, unsigned long in1,
+ unsigned long in2, unsigned long in3,
+ unsigned long in4, unsigned long in5,
+ unsigned long in6, unsigned long in7)
+{
+ unsigned long r9 = 0;
+ unsigned long r10 = 0;
+ long r11 = 0;
+ long status;
+
+ status = 0;
+ switch (index) {
+ case SAL_FREQ_BASE:
+ status = ia64_sal_freq_base(in1, &r9, &r10);
+ break;
+ case SAL_PCI_CONFIG_READ:
+ printk(KERN_WARNING"kvm: Not allowed to call here!"
+ " SAL_PCI_CONFIG_READ\n");
+ break;
+ case SAL_PCI_CONFIG_WRITE:
+ printk(KERN_WARNING"kvm: Not allowed to call here!"
+ " SAL_PCI_CONFIG_WRITE\n");
+ break;
+ case SAL_SET_VECTORS:
+ if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
+ if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
+ status = -2;
+ } else {
+ kvm->arch.rdv_sal_data.boot_ip = in2;
+ kvm->arch.rdv_sal_data.boot_gp = in3;
+ }
+ printk("Rendvous called! iip:%lx\n\n", in2);
+ } else
+ printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
+ "ignored...\n", in1);
+ break;
+ case SAL_GET_STATE_INFO:
+ /* No more info. */
+ status = -5;
+ r9 = 0;
+ break;
+ case SAL_GET_STATE_INFO_SIZE:
+ /* Return a dummy size. */
+ status = 0;
+ r9 = 128;
+ break;
+ case SAL_CLEAR_STATE_INFO:
+ /* Noop. */
+ break;
+ case SAL_MC_RENDEZ:
+ printk(KERN_WARNING
+ "kvm: called SAL_MC_RENDEZ. ignored...\n");
+ break;
+ case SAL_MC_SET_PARAMS:
+ printk(KERN_WARNING
+ "kvm: called SAL_MC_SET_PARAMS.ignored!\n");
+ break;
+ case SAL_CACHE_FLUSH:
+ if (1) {
+ /*Flush using SAL.
+ This method is faster but has a side
+ effect on other vcpu running on
+ this cpu. */
+ status = ia64_sal_cache_flush(in1);
+ } else {
+ /*Maybe need to implement the method
+ without side effect!*/
+ status = 0;
+ }
+ break;
+ case SAL_CACHE_INIT:
+ printk(KERN_WARNING
+ "kvm: called SAL_CACHE_INIT. ignored...\n");
+ break;
+ case SAL_UPDATE_PAL:
+ printk(KERN_WARNING
+ "kvm: CALLED SAL_UPDATE_PAL. ignored...\n");
+ break;
+ default:
+ printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
+ " index:%ld\n", index);
+ status = -1;
+ break;
+ }
+ return ((struct sal_ret_values) {status, r9, r10, r11});
+}
+
+static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
+ u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
+
+ struct exit_ctl_data *p;
+
+ p = kvm_get_exit_data(vcpu);
+
+ if (p) {
+ if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+ *in0 = p->u.sal_data.in0;
+ *in1 = p->u.sal_data.in1;
+ *in2 = p->u.sal_data.in2;
+ *in3 = p->u.sal_data.in3;
+ *in4 = p->u.sal_data.in4;
+ *in5 = p->u.sal_data.in5;
+ *in6 = p->u.sal_data.in6;
+ *in7 = p->u.sal_data.in7;
+ return ;
+ }
+ }
+ *in0 = 0;
+}
+
+void kvm_sal_emul(struct kvm_vcpu *vcpu)
+{
+
+ struct sal_ret_values result;
+ u64 index, in1, in2, in3, in4, in5, in6, in7;
+
+ kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
+ &in3, &in4, &in5, &in6, &in7);
+ result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
+ in4, in5, in6, in7);
+ set_sal_result(vcpu, result);
+}
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
new file mode 100644
index 00000000000..13980d9b8bc
--- /dev/null
+++ b/arch/ia64/kvm/kvm_minstate.h
@@ -0,0 +1,273 @@
+/*
+ * kvm_minstate.h: min save macros
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+
+#include <asm/asmmacro.h>
+#include <asm/types.h>
+#include <asm/kregs.h>
+#include "asm-offsets.h"
+
+#define KVM_MINSTATE_START_SAVE_MIN \
+ mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
+ ;; \
+ mov.m r28 = ar.rnat; \
+ addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
+ ;; \
+ lfetch.fault.excl.nt1 [r22]; \
+ addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
+ mov r23 = ar.bspstore; /* save ar.bspstore */ \
+ ;; \
+ mov ar.bspstore = r22; /* switch to kernel RBS */\
+ ;; \
+ mov r18 = ar.bsp; \
+ mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
+
+
+
+#define KVM_MINSTATE_END_SAVE_MIN \
+ bsw.1; /* switch back to bank 1 (must be last in insn group) */\
+ ;;
+
+
+#define PAL_VSA_SYNC_READ \
+ /* begin to call pal vps sync_read */ \
+ add r25 = VMM_VPD_BASE_OFFSET, r21; \
+ adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \
+ ;; \
+ ld8 r25 = [r25]; /* read vpd base */ \
+ ld8 r20 = [r20]; \
+ ;; \
+ add r20 = PAL_VPS_SYNC_READ,r20; \
+ ;; \
+{ .mii; \
+ nop 0x0; \
+ mov r24 = ip; \
+ mov b0 = r20; \
+ ;; \
+}; \
+{ .mmb; \
+ add r24 = 0x20, r24; \
+ nop 0x0; \
+ br.cond.sptk b0; /* call the service */ \
+ ;; \
+};
+
+
+
+#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
+
+/*
+ * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
+ * the minimum state necessary that allows us to turn psr.ic back
+ * on.
+ *
+ * Assumed state upon entry:
+ * psr.ic: off
+ * r31: contains saved predicates (pr)
+ *
+ * Upon exit, the state is as follows:
+ * psr.ic: off
+ * r2 = points to &pt_regs.r16
+ * r8 = contents of ar.ccv
+ * r9 = contents of ar.csd
+ * r10 = contents of ar.ssd
+ * r11 = FPSR_DEFAULT
+ * r12 = kernel sp (kernel virtual address)
+ * r13 = points to current task_struct (kernel virtual address)
+ * p15 = TRUE if psr.i is set in cr.ipsr
+ * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
+ * preserved
+ *
+ * Note that psr.ic is NOT turned on by this macro. This is so that
+ * we can pass interruption state as arguments to a handler.
+ */
+
+
+#define PT(f) (VMM_PT_REGS_##f##_OFFSET)
+
+#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
+ KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
+ mov r27 = ar.rsc; /* M */ \
+ mov r20 = r1; /* A */ \
+ mov r25 = ar.unat; /* M */ \
+ mov r29 = cr.ipsr; /* M */ \
+ mov r26 = ar.pfs; /* I */ \
+ mov r18 = cr.isr; \
+ COVER; /* B;; (or nothing) */ \
+ ;; \
+ tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
+ mov r1 = r16; \
+/* mov r21=r16; */ \
+ /* switch from user to kernel RBS: */ \
+ ;; \
+ invala; /* M */ \
+ SAVE_IFS; \
+ ;; \
+ KVM_MINSTATE_START_SAVE_MIN \
+ adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
+ adds r16 = PT(CR_IPSR),r1; \
+ ;; \
+ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
+ st8 [r16] = r29; /* save cr.ipsr */ \
+ ;; \
+ lfetch.fault.excl.nt1 [r17]; \
+ tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
+ mov r29 = b0 \
+ ;; \
+ adds r16 = PT(R8),r1; /* initialize first base pointer */\
+ adds r17 = PT(R9),r1; /* initialize second base pointer */\
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r8,16; \
+.mem.offset 8,0; st8.spill [r17] = r9,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r10,24; \
+.mem.offset 8,0; st8.spill [r17] = r11,24; \
+ ;; \
+ mov r9 = cr.iip; /* M */ \
+ mov r10 = ar.fpsr; /* M */ \
+ ;; \
+ st8 [r16] = r9,16; /* save cr.iip */ \
+ st8 [r17] = r30,16; /* save cr.ifs */ \
+ sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
+ ;; \
+ st8 [r16] = r25,16; /* save ar.unat */ \
+ st8 [r17] = r26,16; /* save ar.pfs */ \
+ shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
+ ;; \
+ st8 [r16] = r27,16; /* save ar.rsc */ \
+ st8 [r17] = r28,16; /* save ar.rnat */ \
+ ;; /* avoid RAW on r16 & r17 */ \
+ st8 [r16] = r23,16; /* save ar.bspstore */ \
+ st8 [r17] = r31,16; /* save predicates */ \
+ ;; \
+ st8 [r16] = r29,16; /* save b0 */ \
+ st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
+.mem.offset 8,0; st8.spill [r17] = r12,16; \
+ adds r12 = -16,r1; /* switch to kernel memory stack */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r13,16; \
+.mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
+ mov r13 = r21; /* establish `current' */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r15,16; \
+.mem.offset 8,0; st8.spill [r17] = r14,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16] = r2,16; \
+.mem.offset 8,0; st8.spill [r17] = r3,16; \
+ adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
+ ;; \
+ adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
+ adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
+ mov r26 = cr.iipa; \
+ mov r27 = cr.isr; \
+ ;; \
+ st8 [r16] = r26; \
+ st8 [r17] = r27; \
+ ;; \
+ EXTRA; \
+ mov r8 = ar.ccv; \
+ mov r9 = ar.csd; \
+ mov r10 = ar.ssd; \
+ movl r11 = FPSR_DEFAULT; /* L-unit */ \
+ adds r17 = VMM_VCPU_GP_OFFSET,r13; \
+ ;; \
+ ld8 r1 = [r17];/* establish kernel global pointer */ \
+ ;; \
+ PAL_VSA_SYNC_READ \
+ KVM_MINSTATE_END_SAVE_MIN
+
+/*
+ * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
+ *
+ * Assumed state upon entry:
+ * psr.ic: on
+ * r2: points to &pt_regs.f6
+ * r3: points to &pt_regs.f7
+ * r8: contents of ar.ccv
+ * r9: contents of ar.csd
+ * r10: contents of ar.ssd
+ * r11: FPSR_DEFAULT
+ *
+ * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
+ */
+#define KVM_SAVE_REST \
+.mem.offset 0,0; st8.spill [r2] = r16,16; \
+.mem.offset 8,0; st8.spill [r3] = r17,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r18,16; \
+.mem.offset 8,0; st8.spill [r3] = r19,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r20,16; \
+.mem.offset 8,0; st8.spill [r3] = r21,16; \
+ mov r18=b6; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r22,16; \
+.mem.offset 8,0; st8.spill [r3] = r23,16; \
+ mov r19 = b7; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r24,16; \
+.mem.offset 8,0; st8.spill [r3] = r25,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r26,16; \
+.mem.offset 8,0; st8.spill [r3] = r27,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r28,16; \
+.mem.offset 8,0; st8.spill [r3] = r29,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r30,16; \
+.mem.offset 8,0; st8.spill [r3] = r31,32; \
+ ;; \
+ mov ar.fpsr = r11; \
+ st8 [r2] = r8,8; \
+ adds r24 = PT(B6)-PT(F7),r3; \
+ adds r25 = PT(B7)-PT(F7),r3; \
+ ;; \
+ st8 [r24] = r18,16; /* b6 */ \
+ st8 [r25] = r19,16; /* b7 */ \
+ adds r2 = PT(R4)-PT(F6),r2; \
+ adds r3 = PT(R5)-PT(F7),r3; \
+ ;; \
+ st8 [r24] = r9; /* ar.csd */ \
+ st8 [r25] = r10; /* ar.ssd */ \
+ ;; \
+ mov r18 = ar.unat; \
+ adds r19 = PT(EML_UNAT)-PT(R4),r2; \
+ ;; \
+ st8 [r19] = r18; /* eml_unat */ \
+
+
+#define KVM_SAVE_EXTRA \
+.mem.offset 0,0; st8.spill [r2] = r4,16; \
+.mem.offset 8,0; st8.spill [r3] = r5,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2] = r6,16; \
+.mem.offset 8,0; st8.spill [r3] = r7; \
+ ;; \
+ mov r26 = ar.unat; \
+ ;; \
+ st8 [r2] = r26;/* eml_unat */ \
+
+#define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
+#define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
+#define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
new file mode 100644
index 00000000000..6d6cbcb1489
--- /dev/null
+++ b/arch/ia64/kvm/lapic.h
@@ -0,0 +1,25 @@
+#ifndef __KVM_IA64_LAPIC_H
+#define __KVM_IA64_LAPIC_H
+
+#include <linux/kvm_host.h>
+
+/*
+ * vlsapic
+ */
+struct kvm_lapic{
+ struct kvm_vcpu *vcpu;
+ uint64_t insvc[4];
+ uint64_t vhpi;
+ uint8_t xtp;
+ uint8_t pal_init_pending;
+ uint8_t pad[2];
+};
+
+int kvm_create_lapic(struct kvm_vcpu *vcpu);
+void kvm_free_lapic(struct kvm_vcpu *vcpu);
+
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
+
+#endif
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
new file mode 100644
index 00000000000..e585c460734
--- /dev/null
+++ b/arch/ia64/kvm/misc.h
@@ -0,0 +1,93 @@
+#ifndef __KVM_IA64_MISC_H
+#define __KVM_IA64_MISC_H
+
+#include <linux/kvm_host.h>
+/*
+ * misc.h
+ * Copyright (C) 2007, Intel Corporation.
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+/*
+ *Return p2m base address at host side!
+ */
+static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
+{
+ return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS);
+}
+
+static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
+ u64 paddr, u64 mem_flags)
+{
+ uint64_t *pmt_base = kvm_host_get_pmt(kvm);
+ unsigned long pte;
+
+ pte = PAGE_ALIGN(paddr) | mem_flags;
+ pmt_base[gfn] = pte;
+}
+
+/*Function for translating host address to guest address*/
+
+static inline void *to_guest(struct kvm *kvm, void *addr)
+{
+ return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
+ KVM_VM_DATA_BASE);
+}
+
+/*Function for translating guest address to host address*/
+
+static inline void *to_host(struct kvm *kvm, void *addr)
+{
+ return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
+ + kvm->arch.vm_base);
+}
+
+/* Get host context of the vcpu */
+static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
+{
+ union context *ctx = &vcpu->arch.host;
+ return to_guest(vcpu->kvm, ctx);
+}
+
+/* Get guest context of the vcpu */
+static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
+{
+ union context *ctx = &vcpu->arch.guest;
+ return to_guest(vcpu->kvm, ctx);
+}
+
+/* kvm get exit data from gvmm! */
+static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
+{
+ return &vcpu->arch.exit_data;
+}
+
+/*kvm get vcpu ioreq for kvm module!*/
+static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p_ctl_data;
+
+ if (vcpu) {
+ p_ctl_data = kvm_get_exit_data(vcpu);
+ if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
+ return &p_ctl_data->u.ioreq;
+ }
+
+ return NULL;
+}
+
+#endif
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
new file mode 100644
index 00000000000..351bf70da46
--- /dev/null
+++ b/arch/ia64/kvm/mmio.c
@@ -0,0 +1,341 @@
+/*
+ * mmio.c: MMIO emulation components.
+ * Copyright (c) 2004, Intel Corporation.
+ * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
+ * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
+ *
+ * Copyright (c) 2007 Intel Corporation KVM support.
+ * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/kvm_host.h>
+
+#include "vcpu.h"
+
+static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
+{
+ VLSAPIC_XTP(v) = val;
+}
+
+/*
+ * LSAPIC OFFSET
+ */
+#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
+#define PIB_OFST_INTA 0x1E0000
+#define PIB_OFST_XTP 0x1E0008
+
+/*
+ * execute write IPI op.
+ */
+static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
+ uint64_t addr, uint64_t data)
+{
+ struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
+ unsigned long psr;
+
+ local_irq_save(psr);
+
+ p->exit_reason = EXIT_REASON_IPI;
+ p->u.ipi_data.addr.val = addr;
+ p->u.ipi_data.data.val = data;
+ vmm_transition(current_vcpu);
+
+ local_irq_restore(psr);
+
+}
+
+void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
+ unsigned long length, unsigned long val)
+{
+ addr &= (PIB_SIZE - 1);
+
+ switch (addr) {
+ case PIB_OFST_INTA:
+ /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/
+ panic_vm(v);
+ break;
+ case PIB_OFST_XTP:
+ if (length == 1) {
+ vlsapic_write_xtp(v, val);
+ } else {
+ /*panic_domain(NULL,
+ "Undefined write on PIB XTP\n");*/
+ panic_vm(v);
+ }
+ break;
+ default:
+ if (PIB_LOW_HALF(addr)) {
+ /*lower half */
+ if (length != 8)
+ /*panic_domain(NULL,
+ "Can't LHF write with size %ld!\n",
+ length);*/
+ panic_vm(v);
+ else
+ vlsapic_write_ipi(v, addr, val);
+ } else { /* upper half
+ printk("IPI-UHF write %lx\n",addr);*/
+ panic_vm(v);
+ }
+ break;
+ }
+}
+
+unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
+ unsigned long length)
+{
+ uint64_t result = 0;
+
+ addr &= (PIB_SIZE - 1);
+
+ switch (addr) {
+ case PIB_OFST_INTA:
+ if (length == 1) /* 1 byte load */
+ ; /* There is no i8259, there is no INTA access*/
+ else
+ /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */
+ panic_vm(v);
+
+ break;
+ case PIB_OFST_XTP:
+ if (length == 1) {
+ result = VLSAPIC_XTP(v);
+ /* printk("read xtp %lx\n", result); */
+ } else {
+ /*panic_domain(NULL,
+ "Undefined read on PIB XTP\n");*/
+ panic_vm(v);
+ }
+ break;
+ default:
+ panic_vm(v);
+ break;
+ }
+ return result;
+}
+
+static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
+ u16 s, int ma, int dir)
+{
+ unsigned long iot;
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+ unsigned long psr;
+
+ iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
+
+ local_irq_save(psr);
+
+ /*Intercept the acces for PIB range*/
+ if (iot == GPFN_PIB) {
+ if (!dir)
+ lsapic_write(vcpu, src_pa, s, *dest);
+ else
+ *dest = lsapic_read(vcpu, src_pa, s);
+ goto out;
+ }
+ p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
+ p->u.ioreq.addr = src_pa;
+ p->u.ioreq.size = s;
+ p->u.ioreq.dir = dir;
+ if (dir == IOREQ_WRITE)
+ p->u.ioreq.data = *dest;
+ p->u.ioreq.state = STATE_IOREQ_READY;
+ vmm_transition(vcpu);
+
+ if (p->u.ioreq.state == STATE_IORESP_READY) {
+ if (dir == IOREQ_READ)
+ *dest = p->u.ioreq.data;
+ } else
+ panic_vm(vcpu);
+out:
+ local_irq_restore(psr);
+ return ;
+}
+
+/*
+ dir 1: read 0:write
+ inst_type 0:integer 1:floating point
+ */
+#define SL_INTEGER 0 /* store/load interger*/
+#define SL_FLOATING 1 /* store/load floating*/
+
+void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
+{
+ struct kvm_pt_regs *regs;
+ IA64_BUNDLE bundle;
+ int slot, dir = 0;
+ int inst_type = -1;
+ u16 size = 0;
+ u64 data, slot1a, slot1b, temp, update_reg;
+ s32 imm;
+ INST64 inst;
+
+ regs = vcpu_regs(vcpu);
+
+ if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
+ /* if fetch code fail, return and try again */
+ return;
+ }
+ slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
+ if (!slot)
+ inst.inst = bundle.slot0;
+ else if (slot == 1) {
+ slot1a = bundle.slot1a;
+ slot1b = bundle.slot1b;
+ inst.inst = slot1a + (slot1b << 18);
+ } else if (slot == 2)
+ inst.inst = bundle.slot2;
+
+ /* Integer Load/Store */
+ if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
+ inst_type = SL_INTEGER;
+ size = (inst.M1.x6 & 0x3);
+ if ((inst.M1.x6 >> 2) > 0xb) {
+ /*write*/
+ dir = IOREQ_WRITE;
+ data = vcpu_get_gr(vcpu, inst.M4.r2);
+ } else if ((inst.M1.x6 >> 2) < 0xb) {
+ /*read*/
+ dir = IOREQ_READ;
+ }
+ } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
+ /* Integer Load + Reg update */
+ inst_type = SL_INTEGER;
+ dir = IOREQ_READ;
+ size = (inst.M2.x6 & 0x3);
+ temp = vcpu_get_gr(vcpu, inst.M2.r3);
+ update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
+ temp += update_reg;
+ vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
+ } else if (inst.M3.major == 5) {
+ /*Integer Load/Store + Imm update*/
+ inst_type = SL_INTEGER;
+ size = (inst.M3.x6&0x3);
+ if ((inst.M5.x6 >> 2) > 0xb) {
+ /*write*/
+ dir = IOREQ_WRITE;
+ data = vcpu_get_gr(vcpu, inst.M5.r2);
+ temp = vcpu_get_gr(vcpu, inst.M5.r3);
+ imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
+ (inst.M5.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
+
+ } else if ((inst.M3.x6 >> 2) < 0xb) {
+ /*read*/
+ dir = IOREQ_READ;
+ temp = vcpu_get_gr(vcpu, inst.M3.r3);
+ imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
+ (inst.M3.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
+
+ }
+ } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
+ && inst.M9.m == 0 && inst.M9.x == 0) {
+ /* Floating-point spill*/
+ struct ia64_fpreg v;
+
+ inst_type = SL_FLOATING;
+ dir = IOREQ_WRITE;
+ vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
+ /* Write high word. FIXME: this is a kludge! */
+ v.u.bits[1] &= 0x3ffff;
+ mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
+ data = v.u.bits[0];
+ size = 3;
+ } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
+ /* Floating-point spill + Imm update */
+ struct ia64_fpreg v;
+
+ inst_type = SL_FLOATING;
+ dir = IOREQ_WRITE;
+ vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
+ temp = vcpu_get_gr(vcpu, inst.M10.r3);
+ imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
+ (inst.M10.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
+
+ /* Write high word.FIXME: this is a kludge! */
+ v.u.bits[1] &= 0x3ffff;
+ mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
+ data = v.u.bits[0];
+ size = 3;
+ } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
+ /* Floating-point stf8 + Imm update */
+ struct ia64_fpreg v;
+ inst_type = SL_FLOATING;
+ dir = IOREQ_WRITE;
+ size = 3;
+ vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
+ data = v.u.bits[0]; /* Significand. */
+ temp = vcpu_get_gr(vcpu, inst.M10.r3);
+ imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
+ (inst.M10.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
+ } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
+ && inst.M15.x6 <= 0x2f) {
+ temp = vcpu_get_gr(vcpu, inst.M15.r3);
+ imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
+ (inst.M15.imm7 << 23);
+ temp += imm >> 23;
+ vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
+
+ vcpu_increment_iip(vcpu);
+ return;
+ } else if (inst.M12.major == 6 && inst.M12.m == 1
+ && inst.M12.x == 1 && inst.M12.x6 == 1) {
+ /* Floating-point Load Pair + Imm ldfp8 M12*/
+ struct ia64_fpreg v;
+
+ inst_type = SL_FLOATING;
+ dir = IOREQ_READ;
+ size = 8; /*ldfd*/
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ v.u.bits[0] = data;
+ v.u.bits[1] = 0x1003E;
+ vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
+ padr += 8;
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ v.u.bits[0] = data;
+ v.u.bits[1] = 0x1003E;
+ vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
+ padr += 8;
+ vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
+ vcpu_increment_iip(vcpu);
+ return;
+ } else {
+ inst_type = -1;
+ panic_vm(vcpu);
+ }
+
+ size = 1 << size;
+ if (dir == IOREQ_WRITE) {
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ } else {
+ mmio_access(vcpu, padr, &data, size, ma, dir);
+ if (inst_type == SL_INTEGER)
+ vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
+ else
+ panic_vm(vcpu);
+
+ }
+ vcpu_increment_iip(vcpu);
+}
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
new file mode 100644
index 00000000000..e4f15d641b2
--- /dev/null
+++ b/arch/ia64/kvm/optvfault.S
@@ -0,0 +1,918 @@
+/*
+ * arch/ia64/vmx/optvfault.S
+ * optimize virtualization fault handler
+ *
+ * Copyright (C) 2006 Intel Co
+ * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+
+#include "vti.h"
+#include "asm-offsets.h"
+
+#define ACCE_MOV_FROM_AR
+#define ACCE_MOV_FROM_RR
+#define ACCE_MOV_TO_RR
+#define ACCE_RSM
+#define ACCE_SSM
+#define ACCE_MOV_TO_PSR
+#define ACCE_THASH
+
+//mov r1=ar3
+GLOBAL_ENTRY(kvm_asm_mov_from_ar)
+#ifndef ACCE_MOV_FROM_AR
+ br.many kvm_virtualization_fault_back
+#endif
+ add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
+ add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
+ extr.u r17=r25,6,7
+ ;;
+ ld8 r18=[r18]
+ mov r19=ar.itc
+ mov r24=b0
+ ;;
+ add r19=r19,r18
+ addl r20=@gprel(asm_mov_to_reg),gp
+ ;;
+ st8 [r16] = r19
+ adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
+ shladd r17=r17,4,r20
+ ;;
+ mov b0=r17
+ br.sptk.few b0
+ ;;
+END(kvm_asm_mov_from_ar)
+
+
+// mov r1=rr[r3]
+GLOBAL_ENTRY(kvm_asm_mov_from_rr)
+#ifndef ACCE_MOV_FROM_RR
+ br.many kvm_virtualization_fault_back
+#endif
+ extr.u r16=r25,20,7
+ extr.u r17=r25,6,7
+ addl r20=@gprel(asm_mov_from_reg),gp
+ ;;
+ adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
+ shladd r16=r16,4,r20
+ mov r24=b0
+ ;;
+ add r27=VMM_VCPU_VRR0_OFFSET,r21
+ mov b0=r16
+ br.many b0
+ ;;
+kvm_asm_mov_from_rr_back_1:
+ adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
+ adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
+ shr.u r26=r19,61
+ ;;
+ shladd r17=r17,4,r22
+ shladd r27=r26,3,r27
+ ;;
+ ld8 r19=[r27]
+ mov b0=r17
+ br.many b0
+END(kvm_asm_mov_from_rr)
+
+
+// mov rr[r3]=r2
+GLOBAL_ENTRY(kvm_asm_mov_to_rr)
+#ifndef ACCE_MOV_TO_RR
+ br.many kvm_virtualization_fault_back
+#endif
+ extr.u r16=r25,20,7
+ extr.u r17=r25,13,7
+ addl r20=@gprel(asm_mov_from_reg),gp
+ ;;
+ adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
+ shladd r16=r16,4,r20
+ mov r22=b0
+ ;;
+ add r27=VMM_VCPU_VRR0_OFFSET,r21
+ mov b0=r16
+ br.many b0
+ ;;
+kvm_asm_mov_to_rr_back_1:
+ adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
+ shr.u r23=r19,61
+ shladd r17=r17,4,r20
+ ;;
+ //if rr6, go back
+ cmp.eq p6,p0=6,r23
+ mov b0=r22
+ (p6) br.cond.dpnt.many kvm_virtualization_fault_back
+ ;;
+ mov r28=r19
+ mov b0=r17
+ br.many b0
+kvm_asm_mov_to_rr_back_2:
+ adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
+ shladd r27=r23,3,r27
+ ;; // vrr.rid<<4 |0xe
+ st8 [r27]=r19
+ mov b0=r30
+ ;;
+ extr.u r16=r19,8,26
+ extr.u r18 =r19,2,6
+ mov r17 =0xe
+ ;;
+ shladd r16 = r16, 4, r17
+ extr.u r19 =r19,0,8
+ ;;
+ shl r16 = r16,8
+ ;;
+ add r19 = r19, r16
+ ;; //set ve 1
+ dep r19=-1,r19,0,1
+ cmp.lt p6,p0=14,r18
+ ;;
+ (p6) mov r18=14
+ ;;
+ (p6) dep r19=r18,r19,2,6
+ ;;
+ cmp.eq p6,p0=0,r23
+ ;;
+ cmp.eq.or p6,p0=4,r23
+ ;;
+ adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+ (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
+ ;;
+ ld4 r16=[r16]
+ cmp.eq p7,p0=r0,r0
+ (p6) shladd r17=r23,1,r17
+ ;;
+ (p6) st8 [r17]=r19
+ (p6) tbit.nz p6,p7=r16,0
+ ;;
+ (p7) mov rr[r28]=r19
+ mov r24=r22
+ br.many b0
+END(kvm_asm_mov_to_rr)
+
+
+//rsm
+GLOBAL_ENTRY(kvm_asm_rsm)
+#ifndef ACCE_RSM
+ br.many kvm_virtualization_fault_back
+#endif
+ add r16=VMM_VPD_BASE_OFFSET,r21
+ extr.u r26=r25,6,21
+ extr.u r27=r25,31,2
+ ;;
+ ld8 r16=[r16]
+ extr.u r28=r25,36,1
+ dep r26=r27,r26,21,2
+ ;;
+ add r17=VPD_VPSR_START_OFFSET,r16
+ add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+ //r26 is imm24
+ dep r26=r28,r26,23,1
+ ;;
+ ld8 r18=[r17]
+ movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
+ ld4 r23=[r22]
+ sub r27=-1,r26
+ mov r24=b0
+ ;;
+ mov r20=cr.ipsr
+ or r28=r27,r28
+ and r19=r18,r27
+ ;;
+ st8 [r17]=r19
+ and r20=r20,r28
+ /* Comment it out due to short of fp lazy alorgithm support
+ adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
+ ;;
+ ld8 r27=[r27]
+ ;;
+ tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
+ ;;
+ (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
+ */
+ ;;
+ mov cr.ipsr=r20
+ tbit.nz p6,p0=r23,0
+ ;;
+ tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
+ (p6) br.dptk kvm_resume_to_guest
+ ;;
+ add r26=VMM_VCPU_META_RR0_OFFSET,r21
+ add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
+ dep r23=-1,r23,0,1
+ ;;
+ ld8 r26=[r26]
+ ld8 r27=[r27]
+ st4 [r22]=r23
+ dep.z r28=4,61,3
+ ;;
+ mov rr[r0]=r26
+ ;;
+ mov rr[r28]=r27
+ ;;
+ srlz.d
+ br.many kvm_resume_to_guest
+END(kvm_asm_rsm)
+
+
+//ssm
+GLOBAL_ENTRY(kvm_asm_ssm)
+#ifndef ACCE_SSM
+ br.many kvm_virtualization_fault_back
+#endif
+ add r16=VMM_VPD_BASE_OFFSET,r21
+ extr.u r26=r25,6,21
+ extr.u r27=r25,31,2
+ ;;
+ ld8 r16=[r16]
+ extr.u r28=r25,36,1
+ dep r26=r27,r26,21,2
+ ;; //r26 is imm24
+ add r27=VPD_VPSR_START_OFFSET,r16
+ dep r26=r28,r26,23,1
+ ;; //r19 vpsr
+ ld8 r29=[r27]
+ mov r24=b0
+ ;;
+ add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+ mov r20=cr.ipsr
+ or r19=r29,r26
+ ;;
+ ld4 r23=[r22]
+ st8 [r27]=r19
+ or r20=r20,r26
+ ;;
+ mov cr.ipsr=r20
+ movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
+ ;;
+ and r19=r28,r19
+ tbit.z p6,p0=r23,0
+ ;;
+ cmp.ne.or p6,p0=r28,r19
+ (p6) br.dptk kvm_asm_ssm_1
+ ;;
+ add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
+ add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
+ dep r23=0,r23,0,1
+ ;;
+ ld8 r26=[r26]
+ ld8 r27=[r27]
+ st4 [r22]=r23
+ dep.z r28=4,61,3
+ ;;
+ mov rr[r0]=r26
+ ;;
+ mov rr[r28]=r27
+ ;;
+ srlz.d
+ ;;
+kvm_asm_ssm_1:
+ tbit.nz p6,p0=r29,IA64_PSR_I_BIT
+ ;;
+ tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
+ (p6) br.dptk kvm_resume_to_guest
+ ;;
+ add r29=VPD_VTPR_START_OFFSET,r16
+ add r30=VPD_VHPI_START_OFFSET,r16
+ ;;
+ ld8 r29=[r29]
+ ld8 r30=[r30]
+ ;;
+ extr.u r17=r29,4,4
+ extr.u r18=r29,16,1
+ ;;
+ dep r17=r18,r17,4,1
+ ;;
+ cmp.gt p6,p0=r30,r17
+ (p6) br.dpnt.few kvm_asm_dispatch_vexirq
+ br.many kvm_resume_to_guest
+END(kvm_asm_ssm)
+
+
+//mov psr.l=r2
+GLOBAL_ENTRY(kvm_asm_mov_to_psr)
+#ifndef ACCE_MOV_TO_PSR
+ br.many kvm_virtualization_fault_back
+#endif
+ add r16=VMM_VPD_BASE_OFFSET,r21
+ extr.u r26=r25,13,7 //r2
+ ;;
+ ld8 r16=[r16]
+ addl r20=@gprel(asm_mov_from_reg),gp
+ ;;
+ adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
+ shladd r26=r26,4,r20
+ mov r24=b0
+ ;;
+ add r27=VPD_VPSR_START_OFFSET,r16
+ mov b0=r26
+ br.many b0
+ ;;
+kvm_asm_mov_to_psr_back:
+ ld8 r17=[r27]
+ add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+ dep r19=0,r19,32,32
+ ;;
+ ld4 r23=[r22]
+ dep r18=0,r17,0,32
+ ;;
+ add r30=r18,r19
+ movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
+ ;;
+ st8 [r27]=r30
+ and r27=r28,r30
+ and r29=r28,r17
+ ;;
+ cmp.eq p5,p0=r29,r27
+ cmp.eq p6,p7=r28,r27
+ (p5) br.many kvm_asm_mov_to_psr_1
+ ;;
+ //virtual to physical
+ (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
+ (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
+ (p7) dep r23=-1,r23,0,1
+ ;;
+ //physical to virtual
+ (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
+ (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
+ (p6) dep r23=0,r23,0,1
+ ;;
+ ld8 r26=[r26]
+ ld8 r27=[r27]
+ st4 [r22]=r23
+ dep.z r28=4,61,3
+ ;;
+ mov rr[r0]=r26
+ ;;
+ mov rr[r28]=r27
+ ;;
+ srlz.d
+ ;;
+kvm_asm_mov_to_psr_1:
+ mov r20=cr.ipsr
+ movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
+ ;;
+ or r19=r19,r28
+ dep r20=0,r20,0,32
+ ;;
+ add r20=r19,r20
+ mov b0=r24
+ ;;
+ /* Comment it out due to short of fp lazy algorithm support
+ adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
+ ;;
+ ld8 r27=[r27]
+ ;;
+ tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
+ ;;
+ (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
+ ;;
+ */
+ mov cr.ipsr=r20
+ cmp.ne p6,p0=r0,r0
+ ;;
+ tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
+ tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
+ (p6) br.dpnt.few kvm_resume_to_guest
+ ;;
+ add r29=VPD_VTPR_START_OFFSET,r16
+ add r30=VPD_VHPI_START_OFFSET,r16
+ ;;
+ ld8 r29=[r29]
+ ld8 r30=[r30]
+ ;;
+ extr.u r17=r29,4,4
+ extr.u r18=r29,16,1
+ ;;
+ dep r17=r18,r17,4,1
+ ;;
+ cmp.gt p6,p0=r30,r17
+ (p6) br.dpnt.few kvm_asm_dispatch_vexirq
+ br.many kvm_resume_to_guest
+END(kvm_asm_mov_to_psr)
+
+
+ENTRY(kvm_asm_dispatch_vexirq)
+//increment iip
+ mov r16=cr.ipsr
+ ;;
+ extr.u r17=r16,IA64_PSR_RI_BIT,2
+ tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
+ ;;
+ (p6) mov r18=cr.iip
+ (p6) mov r17=r0
+ (p7) add r17=1,r17
+ ;;
+ (p6) add r18=0x10,r18
+ dep r16=r17,r16,IA64_PSR_RI_BIT,2
+ ;;
+ (p6) mov cr.iip=r18
+ mov cr.ipsr=r16
+ mov r30 =1
+ br.many kvm_dispatch_vexirq
+END(kvm_asm_dispatch_vexirq)
+
+// thash
+// TODO: add support when pta.vf = 1
+GLOBAL_ENTRY(kvm_asm_thash)
+#ifndef ACCE_THASH
+ br.many kvm_virtualization_fault_back
+#endif
+ extr.u r17=r25,20,7 // get r3 from opcode in r25
+ extr.u r18=r25,6,7 // get r1 from opcode in r25
+ addl r20=@gprel(asm_mov_from_reg),gp
+ ;;
+ adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
+ shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
+ adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
+ ;;
+ mov r24=b0
+ ;;
+ ld8 r16=[r16] // get VPD addr
+ mov b0=r17
+ br.many b0 // r19 return value
+ ;;
+kvm_asm_thash_back1:
+ shr.u r23=r19,61 // get RR number
+ adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
+ adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
+ ;;
+ shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr
+ ld8 r17=[r16] // get PTA
+ mov r26=1
+ ;;
+ extr.u r29=r17,2,6 // get pta.size
+ ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value
+ ;;
+ extr.u r25=r25,2,6 // get rr.ps
+ shl r22=r26,r29 // 1UL << pta.size
+ ;;
+ shr.u r23=r19,r25 // vaddr >> rr.ps
+ adds r26=3,r29 // pta.size + 3
+ shl r27=r17,3 // pta << 3
+ ;;
+ shl r23=r23,3 // (vaddr >> rr.ps) << 3
+ shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
+ movl r16=7<<61
+ ;;
+ adds r22=-1,r22 // (1UL << pta.size) - 1
+ shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
+ and r19=r19,r16 // vaddr & VRN_MASK
+ ;;
+ and r22=r22,r23 // vhpt_offset
+ or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
+ adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
+ ;;
+ or r19=r19,r22 // calc pval
+ shladd r17=r18,4,r26
+ adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
+ ;;
+ mov b0=r17
+ br.many b0
+END(kvm_asm_thash)
+
+#define MOV_TO_REG0 \
+{; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ ;; \
+};
+
+
+#define MOV_TO_REG(n) \
+{; \
+ mov r##n##=r19; \
+ mov b0=r30; \
+ br.sptk.many b0; \
+ ;; \
+};
+
+
+#define MOV_FROM_REG(n) \
+{; \
+ mov r19=r##n##; \
+ mov b0=r30; \
+ br.sptk.many b0; \
+ ;; \
+};
+
+
+#define MOV_TO_BANK0_REG(n) \
+ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
+{; \
+ mov r26=r2; \
+ mov r2=r19; \
+ bsw.1; \
+ ;; \
+}; \
+{; \
+ mov r##n##=r2; \
+ nop.b 0x0; \
+ bsw.0; \
+ ;; \
+}; \
+{; \
+ mov r2=r26; \
+ mov b0=r30; \
+ br.sptk.many b0; \
+ ;; \
+}; \
+END(asm_mov_to_bank0_reg##n##)
+
+
+#define MOV_FROM_BANK0_REG(n) \
+ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
+{; \
+ mov r26=r2; \
+ nop.b 0x0; \
+ bsw.1; \
+ ;; \
+}; \
+{; \
+ mov r2=r##n##; \
+ nop.b 0x0; \
+ bsw.0; \
+ ;; \
+}; \
+{; \
+ mov r19=r2; \
+ mov r2=r26; \
+ mov b0=r30; \
+}; \
+{; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ br.sptk.many b0; \
+ ;; \
+}; \
+END(asm_mov_from_bank0_reg##n##)
+
+
+#define JMP_TO_MOV_TO_BANK0_REG(n) \
+{; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ br.sptk.many asm_mov_to_bank0_reg##n##; \
+ ;; \
+}
+
+
+#define JMP_TO_MOV_FROM_BANK0_REG(n) \
+{; \
+ nop.b 0x0; \
+ nop.b 0x0; \
+ br.sptk.many asm_mov_from_bank0_reg##n##; \
+ ;; \
+}
+
+
+MOV_FROM_BANK0_REG(16)
+MOV_FROM_BANK0_REG(17)
+MOV_FROM_BANK0_REG(18)
+MOV_FROM_BANK0_REG(19)
+MOV_FROM_BANK0_REG(20)
+MOV_FROM_BANK0_REG(21)
+MOV_FROM_BANK0_REG(22)
+MOV_FROM_BANK0_REG(23)
+MOV_FROM_BANK0_REG(24)
+MOV_FROM_BANK0_REG(25)
+MOV_FROM_BANK0_REG(26)
+MOV_FROM_BANK0_REG(27)
+MOV_FROM_BANK0_REG(28)
+MOV_FROM_BANK0_REG(29)
+MOV_FROM_BANK0_REG(30)
+MOV_FROM_BANK0_REG(31)
+
+
+// mov from reg table
+ENTRY(asm_mov_from_reg)
+ MOV_FROM_REG(0)
+ MOV_FROM_REG(1)
+ MOV_FROM_REG(2)
+ MOV_FROM_REG(3)
+ MOV_FROM_REG(4)
+ MOV_FROM_REG(5)
+ MOV_FROM_REG(6)
+ MOV_FROM_REG(7)
+ MOV_FROM_REG(8)
+ MOV_FROM_REG(9)
+ MOV_FROM_REG(10)
+ MOV_FROM_REG(11)
+ MOV_FROM_REG(12)
+ MOV_FROM_REG(13)
+ MOV_FROM_REG(14)
+ MOV_FROM_REG(15)
+ JMP_TO_MOV_FROM_BANK0_REG(16)
+ JMP_TO_MOV_FROM_BANK0_REG(17)
+ JMP_TO_MOV_FROM_BANK0_REG(18)
+ JMP_TO_MOV_FROM_BANK0_REG(19)
+ JMP_TO_MOV_FROM_BANK0_REG(20)
+ JMP_TO_MOV_FROM_BANK0_REG(21)
+ JMP_TO_MOV_FROM_BANK0_REG(22)
+ JMP_TO_MOV_FROM_BANK0_REG(23)
+ JMP_TO_MOV_FROM_BANK0_REG(24)
+ JMP_TO_MOV_FROM_BANK0_REG(25)
+ JMP_TO_MOV_FROM_BANK0_REG(26)
+ JMP_TO_MOV_FROM_BANK0_REG(27)
+ JMP_TO_MOV_FROM_BANK0_REG(28)
+ JMP_TO_MOV_FROM_BANK0_REG(29)
+ JMP_TO_MOV_FROM_BANK0_REG(30)
+ JMP_TO_MOV_FROM_BANK0_REG(31)
+ MOV_FROM_REG(32)
+ MOV_FROM_REG(33)
+ MOV_FROM_REG(34)
+ MOV_FROM_REG(35)
+ MOV_FROM_REG(36)
+ MOV_FROM_REG(37)
+ MOV_FROM_REG(38)
+ MOV_FROM_REG(39)
+ MOV_FROM_REG(40)
+ MOV_FROM_REG(41)
+ MOV_FROM_REG(42)
+ MOV_FROM_REG(43)
+ MOV_FROM_REG(44)
+ MOV_FROM_REG(45)
+ MOV_FROM_REG(46)
+ MOV_FROM_REG(47)
+ MOV_FROM_REG(48)
+ MOV_FROM_REG(49)
+ MOV_FROM_REG(50)
+ MOV_FROM_REG(51)
+ MOV_FROM_REG(52)
+ MOV_FROM_REG(53)
+ MOV_FROM_REG(54)
+ MOV_FROM_REG(55)
+ MOV_FROM_REG(56)
+ MOV_FROM_REG(57)
+ MOV_FROM_REG(58)
+ MOV_FROM_REG(59)
+ MOV_FROM_REG(60)
+ MOV_FROM_REG(61)
+ MOV_FROM_REG(62)
+ MOV_FROM_REG(63)
+ MOV_FROM_REG(64)
+ MOV_FROM_REG(65)
+ MOV_FROM_REG(66)
+ MOV_FROM_REG(67)
+ MOV_FROM_REG(68)
+ MOV_FROM_REG(69)
+ MOV_FROM_REG(70)
+ MOV_FROM_REG(71)
+ MOV_FROM_REG(72)
+ MOV_FROM_REG(73)
+ MOV_FROM_REG(74)
+ MOV_FROM_REG(75)
+ MOV_FROM_REG(76)
+ MOV_FROM_REG(77)
+ MOV_FROM_REG(78)
+ MOV_FROM_REG(79)
+ MOV_FROM_REG(80)
+ MOV_FROM_REG(81)
+ MOV_FROM_REG(82)
+ MOV_FROM_REG(83)
+ MOV_FROM_REG(84)
+ MOV_FROM_REG(85)
+ MOV_FROM_REG(86)
+ MOV_FROM_REG(87)
+ MOV_FROM_REG(88)
+ MOV_FROM_REG(89)
+ MOV_FROM_REG(90)
+ MOV_FROM_REG(91)
+ MOV_FROM_REG(92)
+ MOV_FROM_REG(93)
+ MOV_FROM_REG(94)
+ MOV_FROM_REG(95)
+ MOV_FROM_REG(96)
+ MOV_FROM_REG(97)
+ MOV_FROM_REG(98)
+ MOV_FROM_REG(99)
+ MOV_FROM_REG(100)
+ MOV_FROM_REG(101)
+ MOV_FROM_REG(102)
+ MOV_FROM_REG(103)
+ MOV_FROM_REG(104)
+ MOV_FROM_REG(105)
+ MOV_FROM_REG(106)
+ MOV_FROM_REG(107)
+ MOV_FROM_REG(108)
+ MOV_FROM_REG(109)
+ MOV_FROM_REG(110)
+ MOV_FROM_REG(111)
+ MOV_FROM_REG(112)
+ MOV_FROM_REG(113)
+ MOV_FROM_REG(114)
+ MOV_FROM_REG(115)
+ MOV_FROM_REG(116)
+ MOV_FROM_REG(117)
+ MOV_FROM_REG(118)
+ MOV_FROM_REG(119)
+ MOV_FROM_REG(120)
+ MOV_FROM_REG(121)
+ MOV_FROM_REG(122)
+ MOV_FROM_REG(123)
+ MOV_FROM_REG(124)
+ MOV_FROM_REG(125)
+ MOV_FROM_REG(126)
+ MOV_FROM_REG(127)
+END(asm_mov_from_reg)
+
+
+/* must be in bank 0
+ * parameter:
+ * r31: pr
+ * r24: b0
+ */
+ENTRY(kvm_resume_to_guest)
+ adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
+ ;;
+ ld8 r1 =[r16]
+ adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
+ ;;
+ mov r16=cr.ipsr
+ ;;
+ ld8 r20 = [r20]
+ adds r19=VMM_VPD_BASE_OFFSET,r21
+ ;;
+ ld8 r25=[r19]
+ extr.u r17=r16,IA64_PSR_RI_BIT,2
+ tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
+ ;;
+ (p6) mov r18=cr.iip
+ (p6) mov r17=r0
+ ;;
+ (p6) add r18=0x10,r18
+ (p7) add r17=1,r17
+ ;;
+ (p6) mov cr.iip=r18
+ dep r16=r17,r16,IA64_PSR_RI_BIT,2
+ ;;
+ mov cr.ipsr=r16
+ adds r19= VPD_VPSR_START_OFFSET,r25
+ add r28=PAL_VPS_RESUME_NORMAL,r20
+ add r29=PAL_VPS_RESUME_HANDLER,r20
+ ;;
+ ld8 r19=[r19]
+ mov b0=r29
+ cmp.ne p6,p7 = r0,r0
+ ;;
+ tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
+ ;;
+ (p6) ld8 r26=[r25]
+ (p7) mov b0=r28
+ mov pr=r31,-2
+ br.sptk.many b0 // call pal service
+ ;;
+END(kvm_resume_to_guest)
+
+
+MOV_TO_BANK0_REG(16)
+MOV_TO_BANK0_REG(17)
+MOV_TO_BANK0_REG(18)
+MOV_TO_BANK0_REG(19)
+MOV_TO_BANK0_REG(20)
+MOV_TO_BANK0_REG(21)
+MOV_TO_BANK0_REG(22)
+MOV_TO_BANK0_REG(23)
+MOV_TO_BANK0_REG(24)
+MOV_TO_BANK0_REG(25)
+MOV_TO_BANK0_REG(26)
+MOV_TO_BANK0_REG(27)
+MOV_TO_BANK0_REG(28)
+MOV_TO_BANK0_REG(29)
+MOV_TO_BANK0_REG(30)
+MOV_TO_BANK0_REG(31)
+
+
+// mov to reg table
+ENTRY(asm_mov_to_reg)
+ MOV_TO_REG0
+ MOV_TO_REG(1)
+ MOV_TO_REG(2)
+ MOV_TO_REG(3)
+ MOV_TO_REG(4)
+ MOV_TO_REG(5)
+ MOV_TO_REG(6)
+ MOV_TO_REG(7)
+ MOV_TO_REG(8)
+ MOV_TO_REG(9)
+ MOV_TO_REG(10)
+ MOV_TO_REG(11)
+ MOV_TO_REG(12)
+ MOV_TO_REG(13)
+ MOV_TO_REG(14)
+ MOV_TO_REG(15)
+ JMP_TO_MOV_TO_BANK0_REG(16)
+ JMP_TO_MOV_TO_BANK0_REG(17)
+ JMP_TO_MOV_TO_BANK0_REG(18)
+ JMP_TO_MOV_TO_BANK0_REG(19)
+ JMP_TO_MOV_TO_BANK0_REG(20)
+ JMP_TO_MOV_TO_BANK0_REG(21)
+ JMP_TO_MOV_TO_BANK0_REG(22)
+ JMP_TO_MOV_TO_BANK0_REG(23)
+ JMP_TO_MOV_TO_BANK0_REG(24)
+ JMP_TO_MOV_TO_BANK0_REG(25)
+ JMP_TO_MOV_TO_BANK0_REG(26)
+ JMP_TO_MOV_TO_BANK0_REG(27)
+ JMP_TO_MOV_TO_BANK0_REG(28)
+ JMP_TO_MOV_TO_BANK0_REG(29)
+ JMP_TO_MOV_TO_BANK0_REG(30)
+ JMP_TO_MOV_TO_BANK0_REG(31)
+ MOV_TO_REG(32)
+ MOV_TO_REG(33)
+ MOV_TO_REG(34)
+ MOV_TO_REG(35)
+ MOV_TO_REG(36)
+ MOV_TO_REG(37)
+ MOV_TO_REG(38)
+ MOV_TO_REG(39)
+ MOV_TO_REG(40)
+ MOV_TO_REG(41)
+ MOV_TO_REG(42)
+ MOV_TO_REG(43)
+ MOV_TO_REG(44)
+ MOV_TO_REG(45)
+ MOV_TO_REG(46)
+ MOV_TO_REG(47)
+ MOV_TO_REG(48)
+ MOV_TO_REG(49)
+ MOV_TO_REG(50)
+ MOV_TO_REG(51)
+ MOV_TO_REG(52)
+ MOV_TO_REG(53)
+ MOV_TO_REG(54)
+ MOV_TO_REG(55)
+ MOV_TO_REG(56)
+ MOV_TO_REG(57)
+ MOV_TO_REG(58)
+ MOV_TO_REG(59)
+ MOV_TO_REG(60)
+ MOV_TO_REG(61)
+ MOV_TO_REG(62)
+ MOV_TO_REG(63)
+ MOV_TO_REG(64)
+ MOV_TO_REG(65)
+ MOV_TO_REG(66)
+ MOV_TO_REG(67)
+ MOV_TO_REG(68)
+ MOV_TO_REG(69)
+ MOV_TO_REG(70)
+ MOV_TO_REG(71)
+ MOV_TO_REG(72)
+ MOV_TO_REG(73)
+ MOV_TO_REG(74)
+ MOV_TO_REG(75)
+ MOV_TO_REG(76)
+ MOV_TO_REG(77)
+ MOV_TO_REG(78)
+ MOV_TO_REG(79)
+ MOV_TO_REG(80)
+ MOV_TO_REG(81)
+ MOV_TO_REG(82)
+ MOV_TO_REG(83)
+ MOV_TO_REG(84)
+ MOV_TO_REG(85)
+ MOV_TO_REG(86)
+ MOV_TO_REG(87)
+ MOV_TO_REG(88)
+ MOV_TO_REG(89)
+ MOV_TO_REG(90)
+ MOV_TO_REG(91)
+ MOV_TO_REG(92)
+ MOV_TO_REG(93)
+ MOV_TO_REG(94)
+ MOV_TO_REG(95)
+ MOV_TO_REG(96)
+ MOV_TO_REG(97)
+ MOV_TO_REG(98)
+ MOV_TO_REG(99)
+ MOV_TO_REG(100)
+ MOV_TO_REG(101)
+ MOV_TO_REG(102)
+ MOV_TO_REG(103)
+ MOV_TO_REG(104)
+ MOV_TO_REG(105)
+ MOV_TO_REG(106)
+ MOV_TO_REG(107)
+ MOV_TO_REG(108)
+ MOV_TO_REG(109)
+ MOV_TO_REG(110)
+ MOV_TO_REG(111)
+ MOV_TO_REG(112)
+ MOV_TO_REG(113)
+ MOV_TO_REG(114)
+ MOV_TO_REG(115)
+ MOV_TO_REG(116)
+ MOV_TO_REG(117)
+ MOV_TO_REG(118)
+ MOV_TO_REG(119)
+ MOV_TO_REG(120)
+ MOV_TO_REG(121)
+ MOV_TO_REG(122)
+ MOV_TO_REG(123)
+ MOV_TO_REG(124)
+ MOV_TO_REG(125)
+ MOV_TO_REG(126)
+ MOV_TO_REG(127)
+END(asm_mov_to_reg)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
new file mode 100644
index 00000000000..5a33f7ed29a
--- /dev/null
+++ b/arch/ia64/kvm/process.c
@@ -0,0 +1,970 @@
+/*
+ * process.c: handle interruption inject for guests.
+ * Copyright (c) 2005, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Shaofan Li (Susue Li) <susie.li@intel.com>
+ * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ */
+#include "vcpu.h"
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+#include <asm/fpswa.h>
+#include <asm/kregs.h>
+#include <asm/tlb.h>
+
+fpswa_interface_t *vmm_fpswa_interface;
+
+#define IA64_VHPT_TRANS_VECTOR 0x0000
+#define IA64_INST_TLB_VECTOR 0x0400
+#define IA64_DATA_TLB_VECTOR 0x0800
+#define IA64_ALT_INST_TLB_VECTOR 0x0c00
+#define IA64_ALT_DATA_TLB_VECTOR 0x1000
+#define IA64_DATA_NESTED_TLB_VECTOR 0x1400
+#define IA64_INST_KEY_MISS_VECTOR 0x1800
+#define IA64_DATA_KEY_MISS_VECTOR 0x1c00
+#define IA64_DIRTY_BIT_VECTOR 0x2000
+#define IA64_INST_ACCESS_BIT_VECTOR 0x2400
+#define IA64_DATA_ACCESS_BIT_VECTOR 0x2800
+#define IA64_BREAK_VECTOR 0x2c00
+#define IA64_EXTINT_VECTOR 0x3000
+#define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000
+#define IA64_KEY_PERMISSION_VECTOR 0x5100
+#define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200
+#define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300
+#define IA64_GENEX_VECTOR 0x5400
+#define IA64_DISABLED_FPREG_VECTOR 0x5500
+#define IA64_NAT_CONSUMPTION_VECTOR 0x5600
+#define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */
+#define IA64_DEBUG_VECTOR 0x5900
+#define IA64_UNALIGNED_REF_VECTOR 0x5a00
+#define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00
+#define IA64_FP_FAULT_VECTOR 0x5c00
+#define IA64_FP_TRAP_VECTOR 0x5d00
+#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00
+#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
+#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
+
+/* SDM vol2 5.5 - IVA based interruption handling */
+#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
+ IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \
+ IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
+
+#define DOMN_PAL_REQUEST 0x110000
+#define DOMN_SAL_REQUEST 0x110001
+
+static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
+ 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
+ 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
+ 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
+ 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
+ 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
+ 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
+ 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
+};
+
+static void collect_interruption(struct kvm_vcpu *vcpu)
+{
+ u64 ipsr;
+ u64 vdcr;
+ u64 vifs;
+ unsigned long vpsr;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ vpsr = vcpu_get_psr(vcpu);
+ vcpu_bsw0(vcpu);
+ if (vpsr & IA64_PSR_IC) {
+
+ /* Sync mpsr id/da/dd/ss/ed bits to vipsr
+ * since after guest do rfi, we still want these bits on in
+ * mpsr
+ */
+
+ ipsr = regs->cr_ipsr;
+ vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
+ | IA64_PSR_DD | IA64_PSR_SS
+ | IA64_PSR_ED));
+ vcpu_set_ipsr(vcpu, vpsr);
+
+ /* Currently, for trap, we do not advance IIP to next
+ * instruction. That's because we assume caller already
+ * set up IIP correctly
+ */
+
+ vcpu_set_iip(vcpu , regs->cr_iip);
+
+ /* set vifs.v to zero */
+ vifs = VCPU(vcpu, ifs);
+ vifs &= ~IA64_IFS_V;
+ vcpu_set_ifs(vcpu, vifs);
+
+ vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
+ }
+
+ vdcr = VCPU(vcpu, dcr);
+
+ /* Set guest psr
+ * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
+ * be: set to the value of dcr.be
+ * pp: set to the value of dcr.pp
+ */
+ vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
+ vpsr |= (vdcr & IA64_DCR_BE);
+
+ /* VDCR pp bit position is different from VPSR pp bit */
+ if (vdcr & IA64_DCR_PP) {
+ vpsr |= IA64_PSR_PP;
+ } else {
+ vpsr &= ~IA64_PSR_PP;;
+ }
+
+ vcpu_set_psr(vcpu, vpsr);
+
+}
+
+void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
+{
+ u64 viva;
+ struct kvm_pt_regs *regs;
+ union ia64_isr pt_isr;
+
+ regs = vcpu_regs(vcpu);
+
+ /* clear cr.isr.ir (incomplete register frame)*/
+ pt_isr.val = VMX(vcpu, cr_isr);
+ pt_isr.ir = 0;
+ VMX(vcpu, cr_isr) = pt_isr.val;
+
+ collect_interruption(vcpu);
+
+ viva = vcpu_get_iva(vcpu);
+ regs->cr_iip = viva + vec;
+}
+
+static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
+{
+ union ia64_rr rr, rr1;
+
+ rr.val = vcpu_get_rr(vcpu, ifa);
+ rr1.val = 0;
+ rr1.ps = rr.ps;
+ rr1.rid = rr.rid;
+ return (rr1.val);
+}
+
+
+/*
+ * Set vIFA & vITIR & vIHA, when vPSR.ic =1
+ * Parameter:
+ * set_ifa: if true, set vIFA
+ * set_itir: if true, set vITIR
+ * set_iha: if true, set vIHA
+ */
+void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
+ int set_ifa, int set_itir, int set_iha)
+{
+ long vpsr;
+ u64 value;
+
+ vpsr = VCPU(vcpu, vpsr);
+ /* Vol2, Table 8-1 */
+ if (vpsr & IA64_PSR_IC) {
+ if (set_ifa)
+ vcpu_set_ifa(vcpu, vadr);
+ if (set_itir) {
+ value = vcpu_get_itir_on_fault(vcpu, vadr);
+ vcpu_set_itir(vcpu, value);
+ }
+
+ if (set_iha) {
+ value = vcpu_thash(vcpu, vadr);
+ vcpu_set_iha(vcpu, value);
+ }
+ }
+}
+
+/*
+ * Data TLB Fault
+ * @ Data TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR, IHA */
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
+ inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
+}
+
+/*
+ * Instruction TLB Fault
+ * @ Instruction TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR, IHA */
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
+ inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
+}
+
+
+
+/*
+ * Data Nested TLB Fault
+ * @ Data Nested TLB Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void nested_dtlb(struct kvm_vcpu *vcpu)
+{
+ inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
+}
+
+/*
+ * Alternate Data TLB Fault
+ * @ Alternate Data TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+ inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
+}
+
+
+/*
+ * Data TLB Fault
+ * @ Data TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+ inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
+}
+
+/* Deal with:
+ * VHPT Translation Vector
+ */
+static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR, IHA*/
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
+ inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
+
+
+}
+
+/*
+ * VHPT Instruction Fault
+ * @ VHPT Translation vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ _vhpt_fault(vcpu, vadr);
+}
+
+
+/*
+ * VHPT Data Fault
+ * @ VHPT Translation vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ _vhpt_fault(vcpu, vadr);
+}
+
+
+
+/*
+ * Deal with:
+ * General Exception vector
+ */
+void _general_exception(struct kvm_vcpu *vcpu)
+{
+ inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
+}
+
+
+/*
+ * Illegal Operation Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void illegal_op(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/*
+ * Illegal Dependency Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void illegal_dep(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/*
+ * Reserved Register/Field Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void rsv_reg_field(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+/*
+ * Privileged Operation Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+
+void privilege_op(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/*
+ * Unimplement Data Address Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void unimpl_daddr(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/*
+ * Privileged Register Fault
+ * @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void privilege_reg(struct kvm_vcpu *vcpu)
+{
+ _general_exception(vcpu);
+}
+
+/* Deal with
+ * Nat consumption vector
+ * Parameter:
+ * vaddr: Optional, if t == REGISTER
+ */
+static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
+ enum tlb_miss_type t)
+{
+ /* If vPSR.ic && t == DATA/INST, IFA */
+ if (t == DATA || t == INSTRUCTION) {
+ /* IFA */
+ set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
+ }
+
+ inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
+}
+
+/*
+ * Instruction Nat Page Consumption Fault
+ * @ Nat Consumption Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
+}
+
+/*
+ * Register Nat Consumption Fault
+ * @ Nat Consumption Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void rnat_consumption(struct kvm_vcpu *vcpu)
+{
+ _nat_consumption_fault(vcpu, 0, REGISTER);
+}
+
+/*
+ * Data Nat Page Consumption Fault
+ * @ Nat Consumption Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ _nat_consumption_fault(vcpu, vadr, DATA);
+}
+
+/* Deal with
+ * Page not present vector
+ */
+static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR */
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+ inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
+}
+
+
+void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ __page_not_present(vcpu, vadr);
+}
+
+
+void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ __page_not_present(vcpu, vadr);
+}
+
+
+/* Deal with
+ * Data access rights vector
+ */
+void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ /* If vPSR.ic, IFA, ITIR */
+ set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+ inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
+}
+
+fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
+ unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
+ unsigned long *ifs, struct kvm_pt_regs *regs)
+{
+ fp_state_t fp_state;
+ fpswa_ret_t ret;
+ struct kvm_vcpu *vcpu = current_vcpu;
+
+ uint64_t old_rr7 = ia64_get_rr(7UL<<61);
+
+ if (!vmm_fpswa_interface)
+ return (fpswa_ret_t) {-1, 0, 0, 0};
+
+ /*
+ * Just let fpswa driver to use hardware fp registers.
+ * No fp register is valid in memory.
+ */
+ memset(&fp_state, 0, sizeof(fp_state_t));
+
+ /*
+ * unsigned long (*EFI_FPSWA) (
+ * unsigned long trap_type,
+ * void *Bundle,
+ * unsigned long *pipsr,
+ * unsigned long *pfsr,
+ * unsigned long *pisr,
+ * unsigned long *ppreds,
+ * unsigned long *pifs,
+ * void *fp_state);
+ */
+ /*Call host fpswa interface directly to virtualize
+ *guest fpswa request!
+ */
+ ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
+ ia64_srlz_d();
+
+ ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
+ ipsr, fpsr, isr, pr, ifs, &fp_state);
+ ia64_set_rr(7UL << 61, old_rr7);
+ ia64_srlz_d();
+ return ret;
+}
+
+/*
+ * Handle floating-point assist faults and traps for domain.
+ */
+unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
+ unsigned long isr)
+{
+ struct kvm_vcpu *v = current_vcpu;
+ IA64_BUNDLE bundle;
+ unsigned long fault_ip;
+ fpswa_ret_t ret;
+
+ fault_ip = regs->cr_iip;
+ /*
+ * When the FP trap occurs, the trapping instruction is completed.
+ * If ipsr.ri == 0, there is the trapping instruction in previous
+ * bundle.
+ */
+ if (!fp_fault && (ia64_psr(regs)->ri == 0))
+ fault_ip -= 16;
+
+ if (fetch_code(v, fault_ip, &bundle))
+ return -EAGAIN;
+
+ if (!bundle.i64[0] && !bundle.i64[1])
+ return -EACCES;
+
+ ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
+ &isr, &regs->pr, &regs->cr_ifs, regs);
+ return ret.status;
+}
+
+void reflect_interruption(u64 ifa, u64 isr, u64 iim,
+ u64 vec, struct kvm_pt_regs *regs)
+{
+ u64 vector;
+ int status ;
+ struct kvm_vcpu *vcpu = current_vcpu;
+ u64 vpsr = VCPU(vcpu, vpsr);
+
+ vector = vec2off[vec];
+
+ if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
+ panic_vm(vcpu);
+ return;
+ }
+
+ switch (vec) {
+ case 32: /*IA64_FP_FAULT_VECTOR*/
+ status = vmm_handle_fpu_swa(1, regs, isr);
+ if (!status) {
+ vcpu_increment_iip(vcpu);
+ return;
+ } else if (-EAGAIN == status)
+ return;
+ break;
+ case 33: /*IA64_FP_TRAP_VECTOR*/
+ status = vmm_handle_fpu_swa(0, regs, isr);
+ if (!status)
+ return ;
+ else if (-EAGAIN == status) {
+ vcpu_decrement_iip(vcpu);
+ return ;
+ }
+ break;
+ }
+
+ VCPU(vcpu, isr) = isr;
+ VCPU(vcpu, iipa) = regs->cr_iip;
+ if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
+ VCPU(vcpu, iim) = iim;
+ else
+ set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
+
+ inject_guest_interruption(vcpu, vector);
+}
+
+static void set_pal_call_data(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+ /*FIXME:For static and stacked convention, firmware
+ * has put the parameters in gr28-gr31 before
+ * break to vmm !!*/
+
+ p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28);
+ p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29);
+ p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
+ p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
+ p->exit_reason = EXIT_REASON_PAL_CALL;
+}
+
+static void set_pal_call_result(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+ if (p->exit_reason == EXIT_REASON_PAL_CALL) {
+ vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
+ vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
+ vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
+ vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
+ } else
+ panic_vm(vcpu);
+}
+
+static void set_sal_call_data(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+ p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
+ p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
+ p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
+ p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
+ p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
+ p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
+ p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
+ p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
+ p->exit_reason = EXIT_REASON_SAL_CALL;
+}
+
+static void set_sal_call_result(struct kvm_vcpu *vcpu)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+ if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+ vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
+ vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
+ vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
+ vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
+ } else
+ panic_vm(vcpu);
+}
+
+void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
+ unsigned long isr, unsigned long iim)
+{
+ struct kvm_vcpu *v = current_vcpu;
+
+ if (ia64_psr(regs)->cpl == 0) {
+ /* Allow hypercalls only when cpl = 0. */
+ if (iim == DOMN_PAL_REQUEST) {
+ set_pal_call_data(v);
+ vmm_transition(v);
+ set_pal_call_result(v);
+ vcpu_increment_iip(v);
+ return;
+ } else if (iim == DOMN_SAL_REQUEST) {
+ set_sal_call_data(v);
+ vmm_transition(v);
+ set_sal_call_result(v);
+ vcpu_increment_iip(v);
+ return;
+ }
+ }
+ reflect_interruption(ifa, isr, iim, 11, regs);
+}
+
+void check_pending_irq(struct kvm_vcpu *vcpu)
+{
+ int mask, h_pending, h_inservice;
+ u64 isr;
+ unsigned long vpsr;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ h_pending = highest_pending_irq(vcpu);
+ if (h_pending == NULL_VECTOR) {
+ update_vhpi(vcpu, NULL_VECTOR);
+ return;
+ }
+ h_inservice = highest_inservice_irq(vcpu);
+
+ vpsr = VCPU(vcpu, vpsr);
+ mask = irq_masked(vcpu, h_pending, h_inservice);
+ if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
+ isr = vpsr & IA64_PSR_RI;
+ update_vhpi(vcpu, h_pending);
+ reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
+ } else if (mask == IRQ_MASKED_BY_INSVC) {
+ if (VCPU(vcpu, vhpi))
+ update_vhpi(vcpu, NULL_VECTOR);
+ } else {
+ /* masked by vpsr.i or vtpr.*/
+ update_vhpi(vcpu, h_pending);
+ }
+}
+
+static void generate_exirq(struct kvm_vcpu *vcpu)
+{
+ unsigned vpsr;
+ uint64_t isr;
+
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ vpsr = VCPU(vcpu, vpsr);
+ isr = vpsr & IA64_PSR_RI;
+ if (!(vpsr & IA64_PSR_IC))
+ panic_vm(vcpu);
+ reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
+}
+
+void vhpi_detection(struct kvm_vcpu *vcpu)
+{
+ uint64_t threshold, vhpi;
+ union ia64_tpr vtpr;
+ struct ia64_psr vpsr;
+
+ vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+ vtpr.val = VCPU(vcpu, tpr);
+
+ threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
+ vhpi = VCPU(vcpu, vhpi);
+ if (vhpi > threshold) {
+ /* interrupt actived*/
+ generate_exirq(vcpu);
+ }
+}
+
+
+void leave_hypervisor_tail(void)
+{
+ struct kvm_vcpu *v = current_vcpu;
+
+ if (VMX(v, timer_check)) {
+ VMX(v, timer_check) = 0;
+ if (VMX(v, itc_check)) {
+ if (vcpu_get_itc(v) > VCPU(v, itm)) {
+ if (!(VCPU(v, itv) & (1 << 16))) {
+ vcpu_pend_interrupt(v, VCPU(v, itv)
+ & 0xff);
+ VMX(v, itc_check) = 0;
+ } else {
+ v->arch.timer_pending = 1;
+ }
+ VMX(v, last_itc) = VCPU(v, itm) + 1;
+ }
+ }
+ }
+
+ rmb();
+ if (v->arch.irq_new_pending) {
+ v->arch.irq_new_pending = 0;
+ VMX(v, irq_check) = 0;
+ check_pending_irq(v);
+ return;
+ }
+ if (VMX(v, irq_check)) {
+ VMX(v, irq_check) = 0;
+ vhpi_detection(v);
+ }
+}
+
+
+static inline void handle_lds(struct kvm_pt_regs *regs)
+{
+ regs->cr_ipsr |= IA64_PSR_ED;
+}
+
+void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
+{
+ unsigned long pte;
+ union ia64_rr rr;
+
+ rr.val = ia64_get_rr(vadr);
+ pte = vadr & _PAGE_PPN_MASK;
+ pte = pte | PHY_PAGE_WB;
+ thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
+ return;
+}
+
+void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
+{
+ unsigned long vpsr;
+ int type;
+
+ u64 vhpt_adr, gppa, pteval, rr, itir;
+ union ia64_isr misr;
+ union ia64_pta vpta;
+ struct thash_data *data;
+ struct kvm_vcpu *v = current_vcpu;
+
+ vpsr = VCPU(v, vpsr);
+ misr.val = VMX(v, cr_isr);
+
+ type = vec;
+
+ if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
+ if (vec == 2) {
+ if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
+ emulate_io_inst(v, ((vadr << 1) >> 1), 4);
+ return;
+ }
+ }
+ physical_tlb_miss(v, vadr, type);
+ return;
+ }
+ data = vtlb_lookup(v, vadr, type);
+ if (data != 0) {
+ if (type == D_TLB) {
+ gppa = (vadr & ((1UL << data->ps) - 1))
+ + (data->ppn >> (data->ps - 12) << data->ps);
+ if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
+ if (data->pl >= ((regs->cr_ipsr >>
+ IA64_PSR_CPL0_BIT) & 3))
+ emulate_io_inst(v, gppa, data->ma);
+ else {
+ vcpu_set_isr(v, misr.val);
+ data_access_rights(v, vadr);
+ }
+ return ;
+ }
+ }
+ thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
+
+ } else if (type == D_TLB) {
+ if (misr.sp) {
+ handle_lds(regs);
+ return;
+ }
+
+ rr = vcpu_get_rr(v, vadr);
+ itir = rr & (RR_RID_MASK | RR_PS_MASK);
+
+ if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
+ if (vpsr & IA64_PSR_IC) {
+ vcpu_set_isr(v, misr.val);
+ alt_dtlb(v, vadr);
+ } else {
+ nested_dtlb(v);
+ }
+ return ;
+ }
+
+ vpta.val = vcpu_get_pta(v);
+ /* avoid recursively walking (short format) VHPT */
+
+ vhpt_adr = vcpu_thash(v, vadr);
+ if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
+ /* VHPT successfully read. */
+ if (!(pteval & _PAGE_P)) {
+ if (vpsr & IA64_PSR_IC) {
+ vcpu_set_isr(v, misr.val);
+ dtlb_fault(v, vadr);
+ } else {
+ nested_dtlb(v);
+ }
+ } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
+ thash_purge_and_insert(v, pteval, itir,
+ vadr, D_TLB);
+ } else if (vpsr & IA64_PSR_IC) {
+ vcpu_set_isr(v, misr.val);
+ dtlb_fault(v, vadr);
+ } else {
+ nested_dtlb(v);
+ }
+ } else {
+ /* Can't read VHPT. */
+ if (vpsr & IA64_PSR_IC) {
+ vcpu_set_isr(v, misr.val);
+ dvhpt_fault(v, vadr);
+ } else {
+ nested_dtlb(v);
+ }
+ }
+ } else if (type == I_TLB) {
+ if (!(vpsr & IA64_PSR_IC))
+ misr.ni = 1;
+ if (!vhpt_enabled(v, vadr, INST_REF)) {
+ vcpu_set_isr(v, misr.val);
+ alt_itlb(v, vadr);
+ return;
+ }
+
+ vpta.val = vcpu_get_pta(v);
+
+ vhpt_adr = vcpu_thash(v, vadr);
+ if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
+ /* VHPT successfully read. */
+ if (pteval & _PAGE_P) {
+ if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
+ vcpu_set_isr(v, misr.val);
+ itlb_fault(v, vadr);
+ return ;
+ }
+ rr = vcpu_get_rr(v, vadr);
+ itir = rr & (RR_RID_MASK | RR_PS_MASK);
+ thash_purge_and_insert(v, pteval, itir,
+ vadr, I_TLB);
+ } else {
+ vcpu_set_isr(v, misr.val);
+ inst_page_not_present(v, vadr);
+ }
+ } else {
+ vcpu_set_isr(v, misr.val);
+ ivhpt_fault(v, vadr);
+ }
+ }
+}
+
+void kvm_vexirq(struct kvm_vcpu *vcpu)
+{
+ u64 vpsr, isr;
+ struct kvm_pt_regs *regs;
+
+ regs = vcpu_regs(vcpu);
+ vpsr = VCPU(vcpu, vpsr);
+ isr = vpsr & IA64_PSR_RI;
+ reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
+}
+
+void kvm_ia64_handle_irq(struct kvm_vcpu *v)
+{
+ struct exit_ctl_data *p = &v->arch.exit_data;
+ long psr;
+
+ local_irq_save(psr);
+ p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
+ vmm_transition(v);
+ local_irq_restore(psr);
+
+ VMX(v, timer_check) = 1;
+
+}
+
+static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
+{
+ u64 oldrid, moldrid, oldpsbits, vaddr;
+ struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
+ vaddr = p->vaddr;
+
+ oldrid = VMX(v, vrr[0]);
+ VMX(v, vrr[0]) = p->rr;
+ oldpsbits = VMX(v, psbits[0]);
+ VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
+ moldrid = ia64_get_rr(0x0);
+ ia64_set_rr(0x0, vrrtomrr(p->rr));
+ ia64_srlz_d();
+
+ vaddr = PAGEALIGN(vaddr, p->ps);
+ thash_purge_entries_remote(v, vaddr, p->ps);
+
+ VMX(v, vrr[0]) = oldrid;
+ VMX(v, psbits[0]) = oldpsbits;
+ ia64_set_rr(0x0, moldrid);
+ ia64_dv_serialize_data();
+}
+
+static void vcpu_do_resume(struct kvm_vcpu *vcpu)
+{
+ /*Re-init VHPT and VTLB once from resume*/
+ vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
+ thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
+ vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
+ thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
+
+ ia64_set_pta(vcpu->arch.vhpt.pta.val);
+}
+
+static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
+{
+ if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
+ vcpu_do_resume(vcpu);
+ return;
+ }
+
+ if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
+ thash_purge_all(vcpu);
+ return;
+ }
+
+ if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
+ while (vcpu->arch.ptc_g_count > 0)
+ ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
+ }
+}
+
+void vmm_transition(struct kvm_vcpu *vcpu)
+{
+ ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
+ 0, 0, 0, 0, 0, 0);
+ vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
+ ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
+ 0, 0, 0, 0, 0, 0);
+ kvm_do_resume_op(vcpu);
+}
diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S
new file mode 100644
index 00000000000..30897d44d61
--- /dev/null
+++ b/arch/ia64/kvm/trampoline.S
@@ -0,0 +1,1038 @@
+/* Save all processor states
+ *
+ * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com>
+ * Copyright (c) 2007 Anthony Xu <anthony.xu@intel.com>
+ */
+
+#include <asm/asmmacro.h>
+#include "asm-offsets.h"
+
+
+#define CTX(name) VMM_CTX_##name##_OFFSET
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_BRANCH_REGS \
+ add r2 = CTX(B0),r32; \
+ add r3 = CTX(B1),r32; \
+ mov r16 = b0; \
+ mov r17 = b1; \
+ ;; \
+ st8 [r2]=r16,16; \
+ st8 [r3]=r17,16; \
+ ;; \
+ mov r16 = b2; \
+ mov r17 = b3; \
+ ;; \
+ st8 [r2]=r16,16; \
+ st8 [r3]=r17,16; \
+ ;; \
+ mov r16 = b4; \
+ mov r17 = b5; \
+ ;; \
+ st8 [r2]=r16; \
+ st8 [r3]=r17; \
+ ;;
+
+ /*
+ * r33: context_t base address
+ */
+#define RESTORE_BRANCH_REGS \
+ add r2 = CTX(B0),r33; \
+ add r3 = CTX(B1),r33; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov b0 = r16; \
+ mov b1 = r17; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov b2 = r16; \
+ mov b3 = r17; \
+ ;; \
+ ld8 r16=[r2]; \
+ ld8 r17=[r3]; \
+ ;; \
+ mov b4=r16; \
+ mov b5=r17; \
+ ;;
+
+
+ /*
+ * r32: context_t base address
+ * bsw == 1
+ * Save all bank1 general registers, r4 ~ r7
+ */
+#define SAVE_GENERAL_REGS \
+ add r2=CTX(R4),r32; \
+ add r3=CTX(R5),r32; \
+ ;; \
+.mem.offset 0,0; \
+ st8.spill [r2]=r4,16; \
+.mem.offset 8,0; \
+ st8.spill [r3]=r5,16; \
+ ;; \
+.mem.offset 0,0; \
+ st8.spill [r2]=r6,48; \
+.mem.offset 8,0; \
+ st8.spill [r3]=r7,48; \
+ ;; \
+.mem.offset 0,0; \
+ st8.spill [r2]=r12; \
+.mem.offset 8,0; \
+ st8.spill [r3]=r13; \
+ ;;
+
+ /*
+ * r33: context_t base address
+ * bsw == 1
+ */
+#define RESTORE_GENERAL_REGS \
+ add r2=CTX(R4),r33; \
+ add r3=CTX(R5),r33; \
+ ;; \
+ ld8.fill r4=[r2],16; \
+ ld8.fill r5=[r3],16; \
+ ;; \
+ ld8.fill r6=[r2],48; \
+ ld8.fill r7=[r3],48; \
+ ;; \
+ ld8.fill r12=[r2]; \
+ ld8.fill r13 =[r3]; \
+ ;;
+
+
+
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_KERNEL_REGS \
+ add r2 = CTX(KR0),r32; \
+ add r3 = CTX(KR1),r32; \
+ mov r16 = ar.k0; \
+ mov r17 = ar.k1; \
+ ;; \
+ st8 [r2] = r16,16; \
+ st8 [r3] = r17,16; \
+ ;; \
+ mov r16 = ar.k2; \
+ mov r17 = ar.k3; \
+ ;; \
+ st8 [r2] = r16,16; \
+ st8 [r3] = r17,16; \
+ ;; \
+ mov r16 = ar.k4; \
+ mov r17 = ar.k5; \
+ ;; \
+ st8 [r2] = r16,16; \
+ st8 [r3] = r17,16; \
+ ;; \
+ mov r16 = ar.k6; \
+ mov r17 = ar.k7; \
+ ;; \
+ st8 [r2] = r16; \
+ st8 [r3] = r17; \
+ ;;
+
+
+
+ /*
+ * r33: context_t base address
+ */
+#define RESTORE_KERNEL_REGS \
+ add r2 = CTX(KR0),r33; \
+ add r3 = CTX(KR1),r33; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov ar.k0=r16; \
+ mov ar.k1=r17; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov ar.k2=r16; \
+ mov ar.k3=r17; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov ar.k4=r16; \
+ mov ar.k5=r17; \
+ ;; \
+ ld8 r16=[r2],16; \
+ ld8 r17=[r3],16; \
+ ;; \
+ mov ar.k6=r16; \
+ mov ar.k7=r17; \
+ ;;
+
+
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_APP_REGS \
+ add r2 = CTX(BSPSTORE),r32; \
+ mov r16 = ar.bspstore; \
+ ;; \
+ st8 [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\
+ mov r16 = ar.rnat; \
+ ;; \
+ st8 [r2] = r16,CTX(FCR)-CTX(RNAT); \
+ mov r16 = ar.fcr; \
+ ;; \
+ st8 [r2] = r16,CTX(EFLAG)-CTX(FCR); \
+ mov r16 = ar.eflag; \
+ ;; \
+ st8 [r2] = r16,CTX(CFLG)-CTX(EFLAG); \
+ mov r16 = ar.cflg; \
+ ;; \
+ st8 [r2] = r16,CTX(FSR)-CTX(CFLG); \
+ mov r16 = ar.fsr; \
+ ;; \
+ st8 [r2] = r16,CTX(FIR)-CTX(FSR); \
+ mov r16 = ar.fir; \
+ ;; \
+ st8 [r2] = r16,CTX(FDR)-CTX(FIR); \
+ mov r16 = ar.fdr; \
+ ;; \
+ st8 [r2] = r16,CTX(UNAT)-CTX(FDR); \
+ mov r16 = ar.unat; \
+ ;; \
+ st8 [r2] = r16,CTX(FPSR)-CTX(UNAT); \
+ mov r16 = ar.fpsr; \
+ ;; \
+ st8 [r2] = r16,CTX(PFS)-CTX(FPSR); \
+ mov r16 = ar.pfs; \
+ ;; \
+ st8 [r2] = r16,CTX(LC)-CTX(PFS); \
+ mov r16 = ar.lc; \
+ ;; \
+ st8 [r2] = r16; \
+ ;;
+
+ /*
+ * r33: context_t base address
+ */
+#define RESTORE_APP_REGS \
+ add r2=CTX(BSPSTORE),r33; \
+ ;; \
+ ld8 r16=[r2],CTX(RNAT)-CTX(BSPSTORE); \
+ ;; \
+ mov ar.bspstore=r16; \
+ ld8 r16=[r2],CTX(FCR)-CTX(RNAT); \
+ ;; \
+ mov ar.rnat=r16; \
+ ld8 r16=[r2],CTX(EFLAG)-CTX(FCR); \
+ ;; \
+ mov ar.fcr=r16; \
+ ld8 r16=[r2],CTX(CFLG)-CTX(EFLAG); \
+ ;; \
+ mov ar.eflag=r16; \
+ ld8 r16=[r2],CTX(FSR)-CTX(CFLG); \
+ ;; \
+ mov ar.cflg=r16; \
+ ld8 r16=[r2],CTX(FIR)-CTX(FSR); \
+ ;; \
+ mov ar.fsr=r16; \
+ ld8 r16=[r2],CTX(FDR)-CTX(FIR); \
+ ;; \
+ mov ar.fir=r16; \
+ ld8 r16=[r2],CTX(UNAT)-CTX(FDR); \
+ ;; \
+ mov ar.fdr=r16; \
+ ld8 r16=[r2],CTX(FPSR)-CTX(UNAT); \
+ ;; \
+ mov ar.unat=r16; \
+ ld8 r16=[r2],CTX(PFS)-CTX(FPSR); \
+ ;; \
+ mov ar.fpsr=r16; \
+ ld8 r16=[r2],CTX(LC)-CTX(PFS); \
+ ;; \
+ mov ar.pfs=r16; \
+ ld8 r16=[r2]; \
+ ;; \
+ mov ar.lc=r16; \
+ ;;
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_CTL_REGS \
+ add r2 = CTX(DCR),r32; \
+ mov r16 = cr.dcr; \
+ ;; \
+ st8 [r2] = r16,CTX(IVA)-CTX(DCR); \
+ ;; \
+ mov r16 = cr.iva; \
+ ;; \
+ st8 [r2] = r16,CTX(PTA)-CTX(IVA); \
+ ;; \
+ mov r16 = cr.pta; \
+ ;; \
+ st8 [r2] = r16 ; \
+ ;;
+
+ /*
+ * r33: context_t base address
+ */
+#define RESTORE_CTL_REGS \
+ add r2 = CTX(DCR),r33; \
+ ;; \
+ ld8 r16 = [r2],CTX(IVA)-CTX(DCR); \
+ ;; \
+ mov cr.dcr = r16; \
+ dv_serialize_data; \
+ ;; \
+ ld8 r16 = [r2],CTX(PTA)-CTX(IVA); \
+ ;; \
+ mov cr.iva = r16; \
+ dv_serialize_data; \
+ ;; \
+ ld8 r16 = [r2]; \
+ ;; \
+ mov cr.pta = r16; \
+ dv_serialize_data; \
+ ;;
+
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_REGION_REGS \
+ add r2=CTX(RR0),r32; \
+ mov r16=rr[r0]; \
+ dep.z r18=1,61,3; \
+ ;; \
+ st8 [r2]=r16,8; \
+ mov r17=rr[r18]; \
+ dep.z r18=2,61,3; \
+ ;; \
+ st8 [r2]=r17,8; \
+ mov r16=rr[r18]; \
+ dep.z r18=3,61,3; \
+ ;; \
+ st8 [r2]=r16,8; \
+ mov r17=rr[r18]; \
+ dep.z r18=4,61,3; \
+ ;; \
+ st8 [r2]=r17,8; \
+ mov r16=rr[r18]; \
+ dep.z r18=5,61,3; \
+ ;; \
+ st8 [r2]=r16,8; \
+ mov r17=rr[r18]; \
+ dep.z r18=7,61,3; \
+ ;; \
+ st8 [r2]=r17,16; \
+ mov r16=rr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ ;;
+
+ /*
+ * r33:context_t base address
+ */
+#define RESTORE_REGION_REGS \
+ add r2=CTX(RR0),r33;\
+ mov r18=r0; \
+ ;; \
+ ld8 r20=[r2],8; \
+ ;; /* rr0 */ \
+ ld8 r21=[r2],8; \
+ ;; /* rr1 */ \
+ ld8 r22=[r2],8; \
+ ;; /* rr2 */ \
+ ld8 r23=[r2],8; \
+ ;; /* rr3 */ \
+ ld8 r24=[r2],8; \
+ ;; /* rr4 */ \
+ ld8 r25=[r2],16; \
+ ;; /* rr5 */ \
+ ld8 r27=[r2]; \
+ ;; /* rr7 */ \
+ mov rr[r18]=r20; \
+ dep.z r18=1,61,3; \
+ ;; /* rr1 */ \
+ mov rr[r18]=r21; \
+ dep.z r18=2,61,3; \
+ ;; /* rr2 */ \
+ mov rr[r18]=r22; \
+ dep.z r18=3,61,3; \
+ ;; /* rr3 */ \
+ mov rr[r18]=r23; \
+ dep.z r18=4,61,3; \
+ ;; /* rr4 */ \
+ mov rr[r18]=r24; \
+ dep.z r18=5,61,3; \
+ ;; /* rr5 */ \
+ mov rr[r18]=r25; \
+ dep.z r18=7,61,3; \
+ ;; /* rr7 */ \
+ mov rr[r18]=r27; \
+ ;; \
+ srlz.i; \
+ ;;
+
+
+
+ /*
+ * r32: context_t base address
+ * r36~r39:scratch registers
+ */
+#define SAVE_DEBUG_REGS \
+ add r2=CTX(IBR0),r32; \
+ add r3=CTX(DBR0),r32; \
+ mov r16=ibr[r0]; \
+ mov r17=dbr[r0]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=1,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=2,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=2,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=3,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=4,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=5,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=6,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ add r18=7,r0; \
+ ;; \
+ mov r16=ibr[r18]; \
+ mov r17=dbr[r18]; \
+ ;; \
+ st8 [r2]=r16,8; \
+ st8 [r3]=r17,8; \
+ ;;
+
+
+/*
+ * r33: point to context_t structure
+ * ar.lc are corrupted.
+ */
+#define RESTORE_DEBUG_REGS \
+ add r2=CTX(IBR0),r33; \
+ add r3=CTX(DBR0),r33; \
+ mov r16=7; \
+ mov r17=r0; \
+ ;; \
+ mov ar.lc = r16; \
+ ;; \
+1: \
+ ld8 r18=[r2],8; \
+ ld8 r19=[r3],8; \
+ ;; \
+ mov ibr[r17]=r18; \
+ mov dbr[r17]=r19; \
+ ;; \
+ srlz.i; \
+ ;; \
+ add r17=1,r17; \
+ br.cloop.sptk 1b; \
+ ;;
+
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_FPU_LOW \
+ add r2=CTX(F2),r32; \
+ add r3=CTX(F3),r32; \
+ ;; \
+ stf.spill.nta [r2]=f2,32; \
+ stf.spill.nta [r3]=f3,32; \
+ ;; \
+ stf.spill.nta [r2]=f4,32; \
+ stf.spill.nta [r3]=f5,32; \
+ ;; \
+ stf.spill.nta [r2]=f6,32; \
+ stf.spill.nta [r3]=f7,32; \
+ ;; \
+ stf.spill.nta [r2]=f8,32; \
+ stf.spill.nta [r3]=f9,32; \
+ ;; \
+ stf.spill.nta [r2]=f10,32; \
+ stf.spill.nta [r3]=f11,32; \
+ ;; \
+ stf.spill.nta [r2]=f12,32; \
+ stf.spill.nta [r3]=f13,32; \
+ ;; \
+ stf.spill.nta [r2]=f14,32; \
+ stf.spill.nta [r3]=f15,32; \
+ ;; \
+ stf.spill.nta [r2]=f16,32; \
+ stf.spill.nta [r3]=f17,32; \
+ ;; \
+ stf.spill.nta [r2]=f18,32; \
+ stf.spill.nta [r3]=f19,32; \
+ ;; \
+ stf.spill.nta [r2]=f20,32; \
+ stf.spill.nta [r3]=f21,32; \
+ ;; \
+ stf.spill.nta [r2]=f22,32; \
+ stf.spill.nta [r3]=f23,32; \
+ ;; \
+ stf.spill.nta [r2]=f24,32; \
+ stf.spill.nta [r3]=f25,32; \
+ ;; \
+ stf.spill.nta [r2]=f26,32; \
+ stf.spill.nta [r3]=f27,32; \
+ ;; \
+ stf.spill.nta [r2]=f28,32; \
+ stf.spill.nta [r3]=f29,32; \
+ ;; \
+ stf.spill.nta [r2]=f30; \
+ stf.spill.nta [r3]=f31; \
+ ;;
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_FPU_HIGH \
+ add r2=CTX(F32),r32; \
+ add r3=CTX(F33),r32; \
+ ;; \
+ stf.spill.nta [r2]=f32,32; \
+ stf.spill.nta [r3]=f33,32; \
+ ;; \
+ stf.spill.nta [r2]=f34,32; \
+ stf.spill.nta [r3]=f35,32; \
+ ;; \
+ stf.spill.nta [r2]=f36,32; \
+ stf.spill.nta [r3]=f37,32; \
+ ;; \
+ stf.spill.nta [r2]=f38,32; \
+ stf.spill.nta [r3]=f39,32; \
+ ;; \
+ stf.spill.nta [r2]=f40,32; \
+ stf.spill.nta [r3]=f41,32; \
+ ;; \
+ stf.spill.nta [r2]=f42,32; \
+ stf.spill.nta [r3]=f43,32; \
+ ;; \
+ stf.spill.nta [r2]=f44,32; \
+ stf.spill.nta [r3]=f45,32; \
+ ;; \
+ stf.spill.nta [r2]=f46,32; \
+ stf.spill.nta [r3]=f47,32; \
+ ;; \
+ stf.spill.nta [r2]=f48,32; \
+ stf.spill.nta [r3]=f49,32; \
+ ;; \
+ stf.spill.nta [r2]=f50,32; \
+ stf.spill.nta [r3]=f51,32; \
+ ;; \
+ stf.spill.nta [r2]=f52,32; \
+ stf.spill.nta [r3]=f53,32; \
+ ;; \
+ stf.spill.nta [r2]=f54,32; \
+ stf.spill.nta [r3]=f55,32; \
+ ;; \
+ stf.spill.nta [r2]=f56,32; \
+ stf.spill.nta [r3]=f57,32; \
+ ;; \
+ stf.spill.nta [r2]=f58,32; \
+ stf.spill.nta [r3]=f59,32; \
+ ;; \
+ stf.spill.nta [r2]=f60,32; \
+ stf.spill.nta [r3]=f61,32; \
+ ;; \
+ stf.spill.nta [r2]=f62,32; \
+ stf.spill.nta [r3]=f63,32; \
+ ;; \
+ stf.spill.nta [r2]=f64,32; \
+ stf.spill.nta [r3]=f65,32; \
+ ;; \
+ stf.spill.nta [r2]=f66,32; \
+ stf.spill.nta [r3]=f67,32; \
+ ;; \
+ stf.spill.nta [r2]=f68,32; \
+ stf.spill.nta [r3]=f69,32; \
+ ;; \
+ stf.spill.nta [r2]=f70,32; \
+ stf.spill.nta [r3]=f71,32; \
+ ;; \
+ stf.spill.nta [r2]=f72,32; \
+ stf.spill.nta [r3]=f73,32; \
+ ;; \
+ stf.spill.nta [r2]=f74,32; \
+ stf.spill.nta [r3]=f75,32; \
+ ;; \
+ stf.spill.nta [r2]=f76,32; \
+ stf.spill.nta [r3]=f77,32; \
+ ;; \
+ stf.spill.nta [r2]=f78,32; \
+ stf.spill.nta [r3]=f79,32; \
+ ;; \
+ stf.spill.nta [r2]=f80,32; \
+ stf.spill.nta [r3]=f81,32; \
+ ;; \
+ stf.spill.nta [r2]=f82,32; \
+ stf.spill.nta [r3]=f83,32; \
+ ;; \
+ stf.spill.nta [r2]=f84,32; \
+ stf.spill.nta [r3]=f85,32; \
+ ;; \
+ stf.spill.nta [r2]=f86,32; \
+ stf.spill.nta [r3]=f87,32; \
+ ;; \
+ stf.spill.nta [r2]=f88,32; \
+ stf.spill.nta [r3]=f89,32; \
+ ;; \
+ stf.spill.nta [r2]=f90,32; \
+ stf.spill.nta [r3]=f91,32; \
+ ;; \
+ stf.spill.nta [r2]=f92,32; \
+ stf.spill.nta [r3]=f93,32; \
+ ;; \
+ stf.spill.nta [r2]=f94,32; \
+ stf.spill.nta [r3]=f95,32; \
+ ;; \
+ stf.spill.nta [r2]=f96,32; \
+ stf.spill.nta [r3]=f97,32; \
+ ;; \
+ stf.spill.nta [r2]=f98,32; \
+ stf.spill.nta [r3]=f99,32; \
+ ;; \
+ stf.spill.nta [r2]=f100,32; \
+ stf.spill.nta [r3]=f101,32; \
+ ;; \
+ stf.spill.nta [r2]=f102,32; \
+ stf.spill.nta [r3]=f103,32; \
+ ;; \
+ stf.spill.nta [r2]=f104,32; \
+ stf.spill.nta [r3]=f105,32; \
+ ;; \
+ stf.spill.nta [r2]=f106,32; \
+ stf.spill.nta [r3]=f107,32; \
+ ;; \
+ stf.spill.nta [r2]=f108,32; \
+ stf.spill.nta [r3]=f109,32; \
+ ;; \
+ stf.spill.nta [r2]=f110,32; \
+ stf.spill.nta [r3]=f111,32; \
+ ;; \
+ stf.spill.nta [r2]=f112,32; \
+ stf.spill.nta [r3]=f113,32; \
+ ;; \
+ stf.spill.nta [r2]=f114,32; \
+ stf.spill.nta [r3]=f115,32; \
+ ;; \
+ stf.spill.nta [r2]=f116,32; \
+ stf.spill.nta [r3]=f117,32; \
+ ;; \
+ stf.spill.nta [r2]=f118,32; \
+ stf.spill.nta [r3]=f119,32; \
+ ;; \
+ stf.spill.nta [r2]=f120,32; \
+ stf.spill.nta [r3]=f121,32; \
+ ;; \
+ stf.spill.nta [r2]=f122,32; \
+ stf.spill.nta [r3]=f123,32; \
+ ;; \
+ stf.spill.nta [r2]=f124,32; \
+ stf.spill.nta [r3]=f125,32; \
+ ;; \
+ stf.spill.nta [r2]=f126; \
+ stf.spill.nta [r3]=f127; \
+ ;;
+
+ /*
+ * r33: point to context_t structure
+ */
+#define RESTORE_FPU_LOW \
+ add r2 = CTX(F2), r33; \
+ add r3 = CTX(F3), r33; \
+ ;; \
+ ldf.fill.nta f2 = [r2], 32; \
+ ldf.fill.nta f3 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f4 = [r2], 32; \
+ ldf.fill.nta f5 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f6 = [r2], 32; \
+ ldf.fill.nta f7 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f8 = [r2], 32; \
+ ldf.fill.nta f9 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f10 = [r2], 32; \
+ ldf.fill.nta f11 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f12 = [r2], 32; \
+ ldf.fill.nta f13 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f14 = [r2], 32; \
+ ldf.fill.nta f15 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f16 = [r2], 32; \
+ ldf.fill.nta f17 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f18 = [r2], 32; \
+ ldf.fill.nta f19 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f20 = [r2], 32; \
+ ldf.fill.nta f21 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f22 = [r2], 32; \
+ ldf.fill.nta f23 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f24 = [r2], 32; \
+ ldf.fill.nta f25 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f26 = [r2], 32; \
+ ldf.fill.nta f27 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f28 = [r2], 32; \
+ ldf.fill.nta f29 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f30 = [r2], 32; \
+ ldf.fill.nta f31 = [r3], 32; \
+ ;;
+
+
+
+ /*
+ * r33: point to context_t structure
+ */
+#define RESTORE_FPU_HIGH \
+ add r2 = CTX(F32), r33; \
+ add r3 = CTX(F33), r33; \
+ ;; \
+ ldf.fill.nta f32 = [r2], 32; \
+ ldf.fill.nta f33 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f34 = [r2], 32; \
+ ldf.fill.nta f35 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f36 = [r2], 32; \
+ ldf.fill.nta f37 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f38 = [r2], 32; \
+ ldf.fill.nta f39 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f40 = [r2], 32; \
+ ldf.fill.nta f41 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f42 = [r2], 32; \
+ ldf.fill.nta f43 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f44 = [r2], 32; \
+ ldf.fill.nta f45 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f46 = [r2], 32; \
+ ldf.fill.nta f47 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f48 = [r2], 32; \
+ ldf.fill.nta f49 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f50 = [r2], 32; \
+ ldf.fill.nta f51 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f52 = [r2], 32; \
+ ldf.fill.nta f53 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f54 = [r2], 32; \
+ ldf.fill.nta f55 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f56 = [r2], 32; \
+ ldf.fill.nta f57 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f58 = [r2], 32; \
+ ldf.fill.nta f59 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f60 = [r2], 32; \
+ ldf.fill.nta f61 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f62 = [r2], 32; \
+ ldf.fill.nta f63 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f64 = [r2], 32; \
+ ldf.fill.nta f65 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f66 = [r2], 32; \
+ ldf.fill.nta f67 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f68 = [r2], 32; \
+ ldf.fill.nta f69 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f70 = [r2], 32; \
+ ldf.fill.nta f71 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f72 = [r2], 32; \
+ ldf.fill.nta f73 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f74 = [r2], 32; \
+ ldf.fill.nta f75 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f76 = [r2], 32; \
+ ldf.fill.nta f77 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f78 = [r2], 32; \
+ ldf.fill.nta f79 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f80 = [r2], 32; \
+ ldf.fill.nta f81 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f82 = [r2], 32; \
+ ldf.fill.nta f83 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f84 = [r2], 32; \
+ ldf.fill.nta f85 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f86 = [r2], 32; \
+ ldf.fill.nta f87 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f88 = [r2], 32; \
+ ldf.fill.nta f89 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f90 = [r2], 32; \
+ ldf.fill.nta f91 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f92 = [r2], 32; \
+ ldf.fill.nta f93 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f94 = [r2], 32; \
+ ldf.fill.nta f95 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f96 = [r2], 32; \
+ ldf.fill.nta f97 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f98 = [r2], 32; \
+ ldf.fill.nta f99 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f100 = [r2], 32; \
+ ldf.fill.nta f101 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f102 = [r2], 32; \
+ ldf.fill.nta f103 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f104 = [r2], 32; \
+ ldf.fill.nta f105 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f106 = [r2], 32; \
+ ldf.fill.nta f107 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f108 = [r2], 32; \
+ ldf.fill.nta f109 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f110 = [r2], 32; \
+ ldf.fill.nta f111 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f112 = [r2], 32; \
+ ldf.fill.nta f113 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f114 = [r2], 32; \
+ ldf.fill.nta f115 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f116 = [r2], 32; \
+ ldf.fill.nta f117 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f118 = [r2], 32; \
+ ldf.fill.nta f119 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f120 = [r2], 32; \
+ ldf.fill.nta f121 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f122 = [r2], 32; \
+ ldf.fill.nta f123 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f124 = [r2], 32; \
+ ldf.fill.nta f125 = [r3], 32; \
+ ;; \
+ ldf.fill.nta f126 = [r2], 32; \
+ ldf.fill.nta f127 = [r3], 32; \
+ ;;
+
+ /*
+ * r32: context_t base address
+ */
+#define SAVE_PTK_REGS \
+ add r2=CTX(PKR0), r32; \
+ mov r16=7; \
+ ;; \
+ mov ar.lc=r16; \
+ mov r17=r0; \
+ ;; \
+1: \
+ mov r18=pkr[r17]; \
+ ;; \
+ srlz.i; \
+ ;; \
+ st8 [r2]=r18, 8; \
+ ;; \
+ add r17 =1,r17; \
+ ;; \
+ br.cloop.sptk 1b; \
+ ;;
+
+/*
+ * r33: point to context_t structure
+ * ar.lc are corrupted.
+ */
+#define RESTORE_PTK_REGS \
+ add r2=CTX(PKR0), r33; \
+ mov r16=7; \
+ ;; \
+ mov ar.lc=r16; \
+ mov r17=r0; \
+ ;; \
+1: \
+ ld8 r18=[r2], 8; \
+ ;; \
+ mov pkr[r17]=r18; \
+ ;; \
+ srlz.i; \
+ ;; \
+ add r17 =1,r17; \
+ ;; \
+ br.cloop.sptk 1b; \
+ ;;
+
+
+/*
+ * void vmm_trampoline( context_t * from,
+ * context_t * to)
+ *
+ * from: r32
+ * to: r33
+ * note: interrupt disabled before call this function.
+ */
+GLOBAL_ENTRY(vmm_trampoline)
+ mov r16 = psr
+ adds r2 = CTX(PSR), r32
+ ;;
+ st8 [r2] = r16, 8 // psr
+ mov r17 = pr
+ ;;
+ st8 [r2] = r17, 8 // pr
+ mov r18 = ar.unat
+ ;;
+ st8 [r2] = r18
+ mov r17 = ar.rsc
+ ;;
+ adds r2 = CTX(RSC),r32
+ ;;
+ st8 [r2]= r17
+ mov ar.rsc =0
+ flushrs
+ ;;
+ SAVE_GENERAL_REGS
+ ;;
+ SAVE_KERNEL_REGS
+ ;;
+ SAVE_APP_REGS
+ ;;
+ SAVE_BRANCH_REGS
+ ;;
+ SAVE_CTL_REGS
+ ;;
+ SAVE_REGION_REGS
+ ;;
+ //SAVE_DEBUG_REGS
+ ;;
+ rsm psr.dfl
+ ;;
+ srlz.d
+ ;;
+ SAVE_FPU_LOW
+ ;;
+ rsm psr.dfh
+ ;;
+ srlz.d
+ ;;
+ SAVE_FPU_HIGH
+ ;;
+ SAVE_PTK_REGS
+ ;;
+ RESTORE_PTK_REGS
+ ;;
+ RESTORE_FPU_HIGH
+ ;;
+ RESTORE_FPU_LOW
+ ;;
+ //RESTORE_DEBUG_REGS
+ ;;
+ RESTORE_REGION_REGS
+ ;;
+ RESTORE_CTL_REGS
+ ;;
+ RESTORE_BRANCH_REGS
+ ;;
+ RESTORE_APP_REGS
+ ;;
+ RESTORE_KERNEL_REGS
+ ;;
+ RESTORE_GENERAL_REGS
+ ;;
+ adds r2=CTX(PSR), r33
+ ;;
+ ld8 r16=[r2], 8 // psr
+ ;;
+ mov psr.l=r16
+ ;;
+ srlz.d
+ ;;
+ ld8 r16=[r2], 8 // pr
+ ;;
+ mov pr =r16,-1
+ ld8 r16=[r2] // unat
+ ;;
+ mov ar.unat=r16
+ ;;
+ adds r2=CTX(RSC),r33
+ ;;
+ ld8 r16 =[r2]
+ ;;
+ mov ar.rsc = r16
+ ;;
+ br.ret.sptk.few b0
+END(vmm_trampoline)
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
new file mode 100644
index 00000000000..e44027ce566
--- /dev/null
+++ b/arch/ia64/kvm/vcpu.c
@@ -0,0 +1,2163 @@
+/*
+ * kvm_vcpu.c: handling all virtual cpu related thing.
+ * Copyright (c) 2005, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Shaofan Li (Susue Li) <susie.li@intel.com>
+ * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/types.h>
+
+#include <asm/processor.h>
+#include <asm/ia64regs.h>
+#include <asm/gcc_intrin.h>
+#include <asm/kregs.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+#include "asm-offsets.h"
+#include "vcpu.h"
+
+/*
+ * Special notes:
+ * - Index by it/dt/rt sequence
+ * - Only existing mode transitions are allowed in this table
+ * - RSE is placed at lazy mode when emulating guest partial mode
+ * - If gva happens to be rr0 and rr4, only allowed case is identity
+ * mapping (gva=gpa), or panic! (How?)
+ */
+int mm_switch_table[8][8] = {
+ /* 2004/09/12(Kevin): Allow switch to self */
+ /*
+ * (it,dt,rt): (0,0,0) -> (1,1,1)
+ * This kind of transition usually occurs in the very early
+ * stage of Linux boot up procedure. Another case is in efi
+ * and pal calls. (see "arch/ia64/kernel/head.S")
+ *
+ * (it,dt,rt): (0,0,0) -> (0,1,1)
+ * This kind of transition is found when OSYa exits efi boot
+ * service. Due to gva = gpa in this case (Same region),
+ * data access can be satisfied though itlb entry for physical
+ * emulation is hit.
+ */
+ {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ /*
+ * (it,dt,rt): (0,1,1) -> (1,1,1)
+ * This kind of transition is found in OSYa.
+ *
+ * (it,dt,rt): (0,1,1) -> (0,0,0)
+ * This kind of transition is found in OSYa
+ */
+ {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
+ /* (1,0,0)->(1,1,1) */
+ {0, 0, 0, 0, 0, 0, 0, SW_P2V},
+ /*
+ * (it,dt,rt): (1,0,1) -> (1,1,1)
+ * This kind of transition usually occurs when Linux returns
+ * from the low level TLB miss handlers.
+ * (see "arch/ia64/kernel/ivt.S")
+ */
+ {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ /*
+ * (it,dt,rt): (1,1,1) -> (1,0,1)
+ * This kind of transition usually occurs in Linux low level
+ * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
+ *
+ * (it,dt,rt): (1,1,1) -> (0,0,0)
+ * This kind of transition usually occurs in pal and efi calls,
+ * which requires running in physical mode.
+ * (see "arch/ia64/kernel/head.S")
+ * (1,1,1)->(1,0,0)
+ */
+
+ {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
+};
+
+void physical_mode_init(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.mode_flags = GUEST_IN_PHY;
+}
+
+void switch_to_physical_rid(struct kvm_vcpu *vcpu)
+{
+ unsigned long psr;
+
+ /* Save original virtual mode rr[0] and rr[4] */
+ psr = ia64_clear_ic();
+ ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
+ ia64_srlz_d();
+ ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
+ ia64_srlz_d();
+
+ ia64_set_psr(psr);
+ return;
+}
+
+
+void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
+{
+ unsigned long psr;
+
+ psr = ia64_clear_ic();
+ ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
+ ia64_srlz_d();
+ ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
+ ia64_srlz_d();
+ ia64_set_psr(psr);
+ return;
+}
+
+static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
+{
+ return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
+}
+
+void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
+ struct ia64_psr new_psr)
+{
+ int act;
+ act = mm_switch_action(old_psr, new_psr);
+ switch (act) {
+ case SW_V2P:
+ /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
+ old_psr.val, new_psr.val);*/
+ switch_to_physical_rid(vcpu);
+ /*
+ * Set rse to enforced lazy, to prevent active rse
+ *save/restor when guest physical mode.
+ */
+ vcpu->arch.mode_flags |= GUEST_IN_PHY;
+ break;
+ case SW_P2V:
+ switch_to_virtual_rid(vcpu);
+ /*
+ * recover old mode which is saved when entering
+ * guest physical mode
+ */
+ vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
+ break;
+ case SW_SELF:
+ break;
+ case SW_NOP:
+ break;
+ default:
+ /* Sanity check */
+ break;
+ }
+ return;
+}
+
+
+
+/*
+ * In physical mode, insert tc/tr for region 0 and 4 uses
+ * RID[0] and RID[4] which is for physical mode emulation.
+ * However what those inserted tc/tr wants is rid for
+ * virtual mode. So original virtual rid needs to be restored
+ * before insert.
+ *
+ * Operations which required such switch include:
+ * - insertions (itc.*, itr.*)
+ * - purges (ptc.* and ptr.*)
+ * - tpa
+ * - tak
+ * - thash?, ttag?
+ * All above needs actual virtual rid for destination entry.
+ */
+
+void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
+ struct ia64_psr new_psr)
+{
+
+ if ((old_psr.dt != new_psr.dt)
+ || (old_psr.it != new_psr.it)
+ || (old_psr.rt != new_psr.rt))
+ switch_mm_mode(vcpu, old_psr, new_psr);
+
+ return;
+}
+
+
+/*
+ * In physical mode, insert tc/tr for region 0 and 4 uses
+ * RID[0] and RID[4] which is for physical mode emulation.
+ * However what those inserted tc/tr wants is rid for
+ * virtual mode. So original virtual rid needs to be restored
+ * before insert.
+ *
+ * Operations which required such switch include:
+ * - insertions (itc.*, itr.*)
+ * - purges (ptc.* and ptr.*)
+ * - tpa
+ * - tak
+ * - thash?, ttag?
+ * All above needs actual virtual rid for destination entry.
+ */
+
+void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
+{
+ if (is_physical_mode(vcpu)) {
+ vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
+ switch_to_virtual_rid(vcpu);
+ }
+ return;
+}
+
+/* Recover always follows prepare */
+void recover_if_physical_mode(struct kvm_vcpu *vcpu)
+{
+ if (is_physical_mode(vcpu))
+ switch_to_physical_rid(vcpu);
+ vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
+ return;
+}
+
+#define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
+
+static u16 gr_info[32] = {
+ 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
+ RPT(r1), RPT(r2), RPT(r3),
+ RPT(r4), RPT(r5), RPT(r6), RPT(r7),
+ RPT(r8), RPT(r9), RPT(r10), RPT(r11),
+ RPT(r12), RPT(r13), RPT(r14), RPT(r15),
+ RPT(r16), RPT(r17), RPT(r18), RPT(r19),
+ RPT(r20), RPT(r21), RPT(r22), RPT(r23),
+ RPT(r24), RPT(r25), RPT(r26), RPT(r27),
+ RPT(r28), RPT(r29), RPT(r30), RPT(r31)
+};
+
+#define IA64_FIRST_STACKED_GR 32
+#define IA64_FIRST_ROTATING_FR 32
+
+static inline unsigned long
+rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
+{
+ reg += rrb;
+ if (reg >= sor)
+ reg -= sor;
+ return reg;
+}
+
+/*
+ * Return the (rotated) index for floating point register
+ * be in the REGNUM (REGNUM must range from 32-127,
+ * result is in the range from 0-95.
+ */
+static inline unsigned long fph_index(struct kvm_pt_regs *regs,
+ long regnum)
+{
+ unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
+ return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
+}
+
+
+/*
+ * The inverse of the above: given bspstore and the number of
+ * registers, calculate ar.bsp.
+ */
+static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
+ long num_regs)
+{
+ long delta = ia64_rse_slot_num(addr) + num_regs;
+ int i = 0;
+
+ if (num_regs < 0)
+ delta -= 0x3e;
+ if (delta < 0) {
+ while (delta <= -0x3f) {
+ i--;
+ delta += 0x3f;
+ }
+ } else {
+ while (delta >= 0x3f) {
+ i++;
+ delta -= 0x3f;
+ }
+ }
+
+ return addr + num_regs + i;
+}
+
+static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
+ unsigned long *val, int *nat)
+{
+ unsigned long *bsp, *addr, *rnat_addr, *bspstore;
+ unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
+ unsigned long nat_mask;
+ unsigned long old_rsc, new_rsc;
+ long sof = (regs->cr_ifs) & 0x7f;
+ long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
+ long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+ long ridx = r1 - 32;
+
+ if (ridx < sor)
+ ridx = rotate_reg(sor, rrb_gr, ridx);
+
+ old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
+ new_rsc = old_rsc&(~(0x3));
+ ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
+
+ bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+ bsp = kbs + (regs->loadrs >> 19);
+
+ addr = kvm_rse_skip_regs(bsp, -sof + ridx);
+ nat_mask = 1UL << ia64_rse_slot_num(addr);
+ rnat_addr = ia64_rse_rnat_addr(addr);
+
+ if (addr >= bspstore) {
+ ia64_flushrs();
+ ia64_mf();
+ bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+ }
+ *val = *addr;
+ if (nat) {
+ if (bspstore < rnat_addr)
+ *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
+ & nat_mask);
+ else
+ *nat = (int)!!((*rnat_addr) & nat_mask);
+ ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
+ }
+}
+
+void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
+ unsigned long val, unsigned long nat)
+{
+ unsigned long *bsp, *bspstore, *addr, *rnat_addr;
+ unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
+ unsigned long nat_mask;
+ unsigned long old_rsc, new_rsc, psr;
+ unsigned long rnat;
+ long sof = (regs->cr_ifs) & 0x7f;
+ long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
+ long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+ long ridx = r1 - 32;
+
+ if (ridx < sor)
+ ridx = rotate_reg(sor, rrb_gr, ridx);
+
+ old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
+ /* put RSC to lazy mode, and set loadrs 0 */
+ new_rsc = old_rsc & (~0x3fff0003);
+ ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
+ bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
+
+ addr = kvm_rse_skip_regs(bsp, -sof + ridx);
+ nat_mask = 1UL << ia64_rse_slot_num(addr);
+ rnat_addr = ia64_rse_rnat_addr(addr);
+
+ local_irq_save(psr);
+ bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+ if (addr >= bspstore) {
+
+ ia64_flushrs();
+ ia64_mf();
+ *addr = val;
+ bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+ rnat = ia64_getreg(_IA64_REG_AR_RNAT);
+ if (bspstore < rnat_addr)
+ rnat = rnat & (~nat_mask);
+ else
+ *rnat_addr = (*rnat_addr)&(~nat_mask);
+
+ ia64_mf();
+ ia64_loadrs();
+ ia64_setreg(_IA64_REG_AR_RNAT, rnat);
+ } else {
+ rnat = ia64_getreg(_IA64_REG_AR_RNAT);
+ *addr = val;
+ if (bspstore < rnat_addr)
+ rnat = rnat&(~nat_mask);
+ else
+ *rnat_addr = (*rnat_addr) & (~nat_mask);
+
+ ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
+ ia64_setreg(_IA64_REG_AR_RNAT, rnat);
+ }
+ local_irq_restore(psr);
+ ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
+}
+
+void getreg(unsigned long regnum, unsigned long *val,
+ int *nat, struct kvm_pt_regs *regs)
+{
+ unsigned long addr, *unat;
+ if (regnum >= IA64_FIRST_STACKED_GR) {
+ get_rse_reg(regs, regnum, val, nat);
+ return;
+ }
+
+ /*
+ * Now look at registers in [0-31] range and init correct UNAT
+ */
+ addr = (unsigned long)regs;
+ unat = &regs->eml_unat;;
+
+ addr += gr_info[regnum];
+
+ *val = *(unsigned long *)addr;
+ /*
+ * do it only when requested
+ */
+ if (nat)
+ *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
+}
+
+void setreg(unsigned long regnum, unsigned long val,
+ int nat, struct kvm_pt_regs *regs)
+{
+ unsigned long addr;
+ unsigned long bitmask;
+ unsigned long *unat;
+
+ /*
+ * First takes care of stacked registers
+ */
+ if (regnum >= IA64_FIRST_STACKED_GR) {
+ set_rse_reg(regs, regnum, val, nat);
+ return;
+ }
+
+ /*
+ * Now look at registers in [0-31] range and init correct UNAT
+ */
+ addr = (unsigned long)regs;
+ unat = &regs->eml_unat;
+ /*
+ * add offset from base of struct
+ * and do it !
+ */
+ addr += gr_info[regnum];
+
+ *(unsigned long *)addr = val;
+
+ /*
+ * We need to clear the corresponding UNAT bit to fully emulate the load
+ * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
+ */
+ bitmask = 1UL << ((addr >> 3) & 0x3f);
+ if (nat)
+ *unat |= bitmask;
+ else
+ *unat &= ~bitmask;
+
+}
+
+u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ u64 val;
+
+ if (!reg)
+ return 0;
+ getreg(reg, &val, 0, regs);
+ return val;
+}
+
+void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ long sof = (regs->cr_ifs) & 0x7f;
+
+ if (!reg)
+ return;
+ if (reg >= sof + 32)
+ return;
+ setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
+}
+
+void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
+ struct kvm_pt_regs *regs)
+{
+ /* Take floating register rotation into consideration*/
+ if (regnum >= IA64_FIRST_ROTATING_FR)
+ regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
+#define CASE_FIXED_FP(reg) \
+ case (reg) : \
+ ia64_stf_spill(fpval, reg); \
+ break
+
+ switch (regnum) {
+ CASE_FIXED_FP(0);
+ CASE_FIXED_FP(1);
+ CASE_FIXED_FP(2);
+ CASE_FIXED_FP(3);
+ CASE_FIXED_FP(4);
+ CASE_FIXED_FP(5);
+
+ CASE_FIXED_FP(6);
+ CASE_FIXED_FP(7);
+ CASE_FIXED_FP(8);
+ CASE_FIXED_FP(9);
+ CASE_FIXED_FP(10);
+ CASE_FIXED_FP(11);
+
+ CASE_FIXED_FP(12);
+ CASE_FIXED_FP(13);
+ CASE_FIXED_FP(14);
+ CASE_FIXED_FP(15);
+ CASE_FIXED_FP(16);
+ CASE_FIXED_FP(17);
+ CASE_FIXED_FP(18);
+ CASE_FIXED_FP(19);
+ CASE_FIXED_FP(20);
+ CASE_FIXED_FP(21);
+ CASE_FIXED_FP(22);
+ CASE_FIXED_FP(23);
+ CASE_FIXED_FP(24);
+ CASE_FIXED_FP(25);
+ CASE_FIXED_FP(26);
+ CASE_FIXED_FP(27);
+ CASE_FIXED_FP(28);
+ CASE_FIXED_FP(29);
+ CASE_FIXED_FP(30);
+ CASE_FIXED_FP(31);
+ CASE_FIXED_FP(32);
+ CASE_FIXED_FP(33);
+ CASE_FIXED_FP(34);
+ CASE_FIXED_FP(35);
+ CASE_FIXED_FP(36);
+ CASE_FIXED_FP(37);
+ CASE_FIXED_FP(38);
+ CASE_FIXED_FP(39);
+ CASE_FIXED_FP(40);
+ CASE_FIXED_FP(41);
+ CASE_FIXED_FP(42);
+ CASE_FIXED_FP(43);
+ CASE_FIXED_FP(44);
+ CASE_FIXED_FP(45);
+ CASE_FIXED_FP(46);
+ CASE_FIXED_FP(47);
+ CASE_FIXED_FP(48);
+ CASE_FIXED_FP(49);
+ CASE_FIXED_FP(50);
+ CASE_FIXED_FP(51);
+ CASE_FIXED_FP(52);
+ CASE_FIXED_FP(53);
+ CASE_FIXED_FP(54);
+ CASE_FIXED_FP(55);
+ CASE_FIXED_FP(56);
+ CASE_FIXED_FP(57);
+ CASE_FIXED_FP(58);
+ CASE_FIXED_FP(59);
+ CASE_FIXED_FP(60);
+ CASE_FIXED_FP(61);
+ CASE_FIXED_FP(62);
+ CASE_FIXED_FP(63);
+ CASE_FIXED_FP(64);
+ CASE_FIXED_FP(65);
+ CASE_FIXED_FP(66);
+ CASE_FIXED_FP(67);
+ CASE_FIXED_FP(68);
+ CASE_FIXED_FP(69);
+ CASE_FIXED_FP(70);
+ CASE_FIXED_FP(71);
+ CASE_FIXED_FP(72);
+ CASE_FIXED_FP(73);
+ CASE_FIXED_FP(74);
+ CASE_FIXED_FP(75);
+ CASE_FIXED_FP(76);
+ CASE_FIXED_FP(77);
+ CASE_FIXED_FP(78);
+ CASE_FIXED_FP(79);
+ CASE_FIXED_FP(80);
+ CASE_FIXED_FP(81);
+ CASE_FIXED_FP(82);
+ CASE_FIXED_FP(83);
+ CASE_FIXED_FP(84);
+ CASE_FIXED_FP(85);
+ CASE_FIXED_FP(86);
+ CASE_FIXED_FP(87);
+ CASE_FIXED_FP(88);
+ CASE_FIXED_FP(89);
+ CASE_FIXED_FP(90);
+ CASE_FIXED_FP(91);
+ CASE_FIXED_FP(92);
+ CASE_FIXED_FP(93);
+ CASE_FIXED_FP(94);
+ CASE_FIXED_FP(95);
+ CASE_FIXED_FP(96);
+ CASE_FIXED_FP(97);
+ CASE_FIXED_FP(98);
+ CASE_FIXED_FP(99);
+ CASE_FIXED_FP(100);
+ CASE_FIXED_FP(101);
+ CASE_FIXED_FP(102);
+ CASE_FIXED_FP(103);
+ CASE_FIXED_FP(104);
+ CASE_FIXED_FP(105);
+ CASE_FIXED_FP(106);
+ CASE_FIXED_FP(107);
+ CASE_FIXED_FP(108);
+ CASE_FIXED_FP(109);
+ CASE_FIXED_FP(110);
+ CASE_FIXED_FP(111);
+ CASE_FIXED_FP(112);
+ CASE_FIXED_FP(113);
+ CASE_FIXED_FP(114);
+ CASE_FIXED_FP(115);
+ CASE_FIXED_FP(116);
+ CASE_FIXED_FP(117);
+ CASE_FIXED_FP(118);
+ CASE_FIXED_FP(119);
+ CASE_FIXED_FP(120);
+ CASE_FIXED_FP(121);
+ CASE_FIXED_FP(122);
+ CASE_FIXED_FP(123);
+ CASE_FIXED_FP(124);
+ CASE_FIXED_FP(125);
+ CASE_FIXED_FP(126);
+ CASE_FIXED_FP(127);
+ }
+#undef CASE_FIXED_FP
+}
+
+void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
+ struct kvm_pt_regs *regs)
+{
+ /* Take floating register rotation into consideration*/
+ if (regnum >= IA64_FIRST_ROTATING_FR)
+ regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
+
+#define CASE_FIXED_FP(reg) \
+ case (reg) : \
+ ia64_ldf_fill(reg, fpval); \
+ break
+
+ switch (regnum) {
+ CASE_FIXED_FP(2);
+ CASE_FIXED_FP(3);
+ CASE_FIXED_FP(4);
+ CASE_FIXED_FP(5);
+
+ CASE_FIXED_FP(6);
+ CASE_FIXED_FP(7);
+ CASE_FIXED_FP(8);
+ CASE_FIXED_FP(9);
+ CASE_FIXED_FP(10);
+ CASE_FIXED_FP(11);
+
+ CASE_FIXED_FP(12);
+ CASE_FIXED_FP(13);
+ CASE_FIXED_FP(14);
+ CASE_FIXED_FP(15);
+ CASE_FIXED_FP(16);
+ CASE_FIXED_FP(17);
+ CASE_FIXED_FP(18);
+ CASE_FIXED_FP(19);
+ CASE_FIXED_FP(20);
+ CASE_FIXED_FP(21);
+ CASE_FIXED_FP(22);
+ CASE_FIXED_FP(23);
+ CASE_FIXED_FP(24);
+ CASE_FIXED_FP(25);
+ CASE_FIXED_FP(26);
+ CASE_FIXED_FP(27);
+ CASE_FIXED_FP(28);
+ CASE_FIXED_FP(29);
+ CASE_FIXED_FP(30);
+ CASE_FIXED_FP(31);
+ CASE_FIXED_FP(32);
+ CASE_FIXED_FP(33);
+ CASE_FIXED_FP(34);
+ CASE_FIXED_FP(35);
+ CASE_FIXED_FP(36);
+ CASE_FIXED_FP(37);
+ CASE_FIXED_FP(38);
+ CASE_FIXED_FP(39);
+ CASE_FIXED_FP(40);
+ CASE_FIXED_FP(41);
+ CASE_FIXED_FP(42);
+ CASE_FIXED_FP(43);
+ CASE_FIXED_FP(44);
+ CASE_FIXED_FP(45);
+ CASE_FIXED_FP(46);
+ CASE_FIXED_FP(47);
+ CASE_FIXED_FP(48);
+ CASE_FIXED_FP(49);
+ CASE_FIXED_FP(50);
+ CASE_FIXED_FP(51);
+ CASE_FIXED_FP(52);
+ CASE_FIXED_FP(53);
+ CASE_FIXED_FP(54);
+ CASE_FIXED_FP(55);
+ CASE_FIXED_FP(56);
+ CASE_FIXED_FP(57);
+ CASE_FIXED_FP(58);
+ CASE_FIXED_FP(59);
+ CASE_FIXED_FP(60);
+ CASE_FIXED_FP(61);
+ CASE_FIXED_FP(62);
+ CASE_FIXED_FP(63);
+ CASE_FIXED_FP(64);
+ CASE_FIXED_FP(65);
+ CASE_FIXED_FP(66);
+ CASE_FIXED_FP(67);
+ CASE_FIXED_FP(68);
+ CASE_FIXED_FP(69);
+ CASE_FIXED_FP(70);
+ CASE_FIXED_FP(71);
+ CASE_FIXED_FP(72);
+ CASE_FIXED_FP(73);
+ CASE_FIXED_FP(74);
+ CASE_FIXED_FP(75);
+ CASE_FIXED_FP(76);
+ CASE_FIXED_FP(77);
+ CASE_FIXED_FP(78);
+ CASE_FIXED_FP(79);
+ CASE_FIXED_FP(80);
+ CASE_FIXED_FP(81);
+ CASE_FIXED_FP(82);
+ CASE_FIXED_FP(83);
+ CASE_FIXED_FP(84);
+ CASE_FIXED_FP(85);
+ CASE_FIXED_FP(86);
+ CASE_FIXED_FP(87);
+ CASE_FIXED_FP(88);
+ CASE_FIXED_FP(89);
+ CASE_FIXED_FP(90);
+ CASE_FIXED_FP(91);
+ CASE_FIXED_FP(92);
+ CASE_FIXED_FP(93);
+ CASE_FIXED_FP(94);
+ CASE_FIXED_FP(95);
+ CASE_FIXED_FP(96);
+ CASE_FIXED_FP(97);
+ CASE_FIXED_FP(98);
+ CASE_FIXED_FP(99);
+ CASE_FIXED_FP(100);
+ CASE_FIXED_FP(101);
+ CASE_FIXED_FP(102);
+ CASE_FIXED_FP(103);
+ CASE_FIXED_FP(104);
+ CASE_FIXED_FP(105);
+ CASE_FIXED_FP(106);
+ CASE_FIXED_FP(107);
+ CASE_FIXED_FP(108);
+ CASE_FIXED_FP(109);
+ CASE_FIXED_FP(110);
+ CASE_FIXED_FP(111);
+ CASE_FIXED_FP(112);
+ CASE_FIXED_FP(113);
+ CASE_FIXED_FP(114);
+ CASE_FIXED_FP(115);
+ CASE_FIXED_FP(116);
+ CASE_FIXED_FP(117);
+ CASE_FIXED_FP(118);
+ CASE_FIXED_FP(119);
+ CASE_FIXED_FP(120);
+ CASE_FIXED_FP(121);
+ CASE_FIXED_FP(122);
+ CASE_FIXED_FP(123);
+ CASE_FIXED_FP(124);
+ CASE_FIXED_FP(125);
+ CASE_FIXED_FP(126);
+ CASE_FIXED_FP(127);
+ }
+}
+
+void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
+ struct ia64_fpreg *val)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
+}
+
+void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
+ struct ia64_fpreg *val)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ if (reg > 1)
+ setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
+}
+
+/************************************************************************
+ * lsapic timer
+ ***********************************************************************/
+u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
+{
+ unsigned long guest_itc;
+ guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
+
+ if (guest_itc >= VMX(vcpu, last_itc)) {
+ VMX(vcpu, last_itc) = guest_itc;
+ return guest_itc;
+ } else
+ return VMX(vcpu, last_itc);
+}
+
+static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
+static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
+{
+ struct kvm_vcpu *v;
+ int i;
+ long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
+ unsigned long vitv = VCPU(vcpu, itv);
+
+ if (vcpu->vcpu_id == 0) {
+ for (i = 0; i < MAX_VCPU_NUM; i++) {
+ v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
+ VMX(v, itc_offset) = itc_offset;
+ VMX(v, last_itc) = 0;
+ }
+ }
+ VMX(vcpu, last_itc) = 0;
+ if (VCPU(vcpu, itm) <= val) {
+ VMX(vcpu, itc_check) = 0;
+ vcpu_unpend_interrupt(vcpu, vitv);
+ } else {
+ VMX(vcpu, itc_check) = 1;
+ vcpu_set_itm(vcpu, VCPU(vcpu, itm));
+ }
+
+}
+
+static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, itm));
+}
+
+static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
+{
+ unsigned long vitv = VCPU(vcpu, itv);
+ VCPU(vcpu, itm) = val;
+
+ if (val > vcpu_get_itc(vcpu)) {
+ VMX(vcpu, itc_check) = 1;
+ vcpu_unpend_interrupt(vcpu, vitv);
+ VMX(vcpu, timer_pending) = 0;
+ } else
+ VMX(vcpu, itc_check) = 0;
+}
+
+#define ITV_VECTOR(itv) (itv&0xff)
+#define ITV_IRQ_MASK(itv) (itv&(1<<16))
+
+static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, itv) = val;
+ if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
+ vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
+ vcpu->arch.timer_pending = 0;
+ }
+}
+
+static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
+{
+ int vec;
+
+ vec = highest_inservice_irq(vcpu);
+ if (vec == NULL_VECTOR)
+ return;
+ VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
+ VCPU(vcpu, eoi) = 0;
+ vcpu->arch.irq_new_pending = 1;
+
+}
+
+/* See Table 5-8 in SDM vol2 for the definition */
+int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
+{
+ union ia64_tpr vtpr;
+
+ vtpr.val = VCPU(vcpu, tpr);
+
+ if (h_inservice == NMI_VECTOR)
+ return IRQ_MASKED_BY_INSVC;
+
+ if (h_pending == NMI_VECTOR) {
+ /* Non Maskable Interrupt */
+ return IRQ_NO_MASKED;
+ }
+
+ if (h_inservice == ExtINT_VECTOR)
+ return IRQ_MASKED_BY_INSVC;
+
+ if (h_pending == ExtINT_VECTOR) {
+ if (vtpr.mmi) {
+ /* mask all external IRQ */
+ return IRQ_MASKED_BY_VTPR;
+ } else
+ return IRQ_NO_MASKED;
+ }
+
+ if (is_higher_irq(h_pending, h_inservice)) {
+ if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
+ return IRQ_NO_MASKED;
+ else
+ return IRQ_MASKED_BY_VTPR;
+ } else {
+ return IRQ_MASKED_BY_INSVC;
+ }
+}
+
+void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
+{
+ long spsr;
+ int ret;
+
+ local_irq_save(spsr);
+ ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
+ local_irq_restore(spsr);
+
+ vcpu->arch.irq_new_pending = 1;
+}
+
+void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
+{
+ long spsr;
+ int ret;
+
+ local_irq_save(spsr);
+ ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
+ local_irq_restore(spsr);
+ if (ret) {
+ vcpu->arch.irq_new_pending = 1;
+ wmb();
+ }
+}
+
+void update_vhpi(struct kvm_vcpu *vcpu, int vec)
+{
+ u64 vhpi;
+
+ if (vec == NULL_VECTOR)
+ vhpi = 0;
+ else if (vec == NMI_VECTOR)
+ vhpi = 32;
+ else if (vec == ExtINT_VECTOR)
+ vhpi = 16;
+ else
+ vhpi = vec >> 4;
+
+ VCPU(vcpu, vhpi) = vhpi;
+ if (VCPU(vcpu, vac).a_int)
+ ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
+ (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
+}
+
+u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
+{
+ int vec, h_inservice, mask;
+
+ vec = highest_pending_irq(vcpu);
+ h_inservice = highest_inservice_irq(vcpu);
+ mask = irq_masked(vcpu, vec, h_inservice);
+ if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
+ if (VCPU(vcpu, vhpi))
+ update_vhpi(vcpu, NULL_VECTOR);
+ return IA64_SPURIOUS_INT_VECTOR;
+ }
+ if (mask == IRQ_MASKED_BY_VTPR) {
+ update_vhpi(vcpu, vec);
+ return IA64_SPURIOUS_INT_VECTOR;
+ }
+ VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
+ vcpu_unpend_interrupt(vcpu, vec);
+ return (u64)vec;
+}
+
+/**************************************************************************
+ Privileged operation emulation routines
+ **************************************************************************/
+u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ union ia64_pta vpta;
+ union ia64_rr vrr;
+ u64 pval;
+ u64 vhpt_offset;
+
+ vpta.val = vcpu_get_pta(vcpu);
+ vrr.val = vcpu_get_rr(vcpu, vadr);
+ vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
+ if (vpta.vf) {
+ pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
+ vpta.val, 0, 0, 0, 0);
+ } else {
+ pval = (vadr & VRN_MASK) | vhpt_offset |
+ (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
+ }
+ return pval;
+}
+
+u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ union ia64_rr vrr;
+ union ia64_pta vpta;
+ u64 pval;
+
+ vpta.val = vcpu_get_pta(vcpu);
+ vrr.val = vcpu_get_rr(vcpu, vadr);
+ if (vpta.vf) {
+ pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
+ 0, 0, 0, 0, 0);
+ } else
+ pval = 1;
+
+ return pval;
+}
+
+u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
+{
+ struct thash_data *data;
+ union ia64_pta vpta;
+ u64 key;
+
+ vpta.val = vcpu_get_pta(vcpu);
+ if (vpta.vf == 0) {
+ key = 1;
+ return key;
+ }
+ data = vtlb_lookup(vcpu, vadr, D_TLB);
+ if (!data || !data->p)
+ key = 1;
+ else
+ key = data->key;
+
+ return key;
+}
+
+
+
+void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long thash, vadr;
+
+ vadr = vcpu_get_gr(vcpu, inst.M46.r3);
+ thash = vcpu_thash(vcpu, vadr);
+ vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
+}
+
+
+void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long tag, vadr;
+
+ vadr = vcpu_get_gr(vcpu, inst.M46.r3);
+ tag = vcpu_ttag(vcpu, vadr);
+ vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
+}
+
+int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
+{
+ struct thash_data *data;
+ union ia64_isr visr, pt_isr;
+ struct kvm_pt_regs *regs;
+ struct ia64_psr vpsr;
+
+ regs = vcpu_regs(vcpu);
+ pt_isr.val = VMX(vcpu, cr_isr);
+ visr.val = 0;
+ visr.ei = pt_isr.ei;
+ visr.ir = pt_isr.ir;
+ vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+ visr.na = 1;
+
+ data = vhpt_lookup(vadr);
+ if (data) {
+ if (data->p == 0) {
+ vcpu_set_isr(vcpu, visr.val);
+ data_page_not_present(vcpu, vadr);
+ return IA64_FAULT;
+ } else if (data->ma == VA_MATTR_NATPAGE) {
+ vcpu_set_isr(vcpu, visr.val);
+ dnat_page_consumption(vcpu, vadr);
+ return IA64_FAULT;
+ } else {
+ *padr = (data->gpaddr >> data->ps << data->ps) |
+ (vadr & (PSIZE(data->ps) - 1));
+ return IA64_NO_FAULT;
+ }
+ }
+
+ data = vtlb_lookup(vcpu, vadr, D_TLB);
+ if (data) {
+ if (data->p == 0) {
+ vcpu_set_isr(vcpu, visr.val);
+ data_page_not_present(vcpu, vadr);
+ return IA64_FAULT;
+ } else if (data->ma == VA_MATTR_NATPAGE) {
+ vcpu_set_isr(vcpu, visr.val);
+ dnat_page_consumption(vcpu, vadr);
+ return IA64_FAULT;
+ } else{
+ *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
+ | (vadr & (PSIZE(data->ps) - 1));
+ return IA64_NO_FAULT;
+ }
+ }
+ if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
+ if (vpsr.ic) {
+ vcpu_set_isr(vcpu, visr.val);
+ alt_dtlb(vcpu, vadr);
+ return IA64_FAULT;
+ } else {
+ nested_dtlb(vcpu);
+ return IA64_FAULT;
+ }
+ } else {
+ if (vpsr.ic) {
+ vcpu_set_isr(vcpu, visr.val);
+ dvhpt_fault(vcpu, vadr);
+ return IA64_FAULT;
+ } else{
+ nested_dtlb(vcpu);
+ return IA64_FAULT;
+ }
+ }
+
+ return IA64_NO_FAULT;
+}
+
+
+int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r1, r3;
+
+ r3 = vcpu_get_gr(vcpu, inst.M46.r3);
+
+ if (vcpu_tpa(vcpu, r3, &r1))
+ return IA64_FAULT;
+
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ return(IA64_NO_FAULT);
+}
+
+void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r1, r3;
+
+ r3 = vcpu_get_gr(vcpu, inst.M46.r3);
+ r1 = vcpu_tak(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+}
+
+
+/************************************
+ * Insert/Purge translation register/cache
+ ************************************/
+void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
+{
+ thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
+}
+
+void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
+{
+ thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
+}
+
+void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
+{
+ u64 ps, va, rid;
+ struct thash_data *p_itr;
+
+ ps = itir_ps(itir);
+ va = PAGEALIGN(ifa, ps);
+ pte &= ~PAGE_FLAGS_RV_MASK;
+ rid = vcpu_get_rr(vcpu, ifa);
+ rid = rid & RR_RID_MASK;
+ p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
+ vcpu_set_tr(p_itr, pte, itir, va, rid);
+ vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
+}
+
+
+void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
+{
+ u64 gpfn;
+ u64 ps, va, rid;
+ struct thash_data *p_dtr;
+
+ ps = itir_ps(itir);
+ va = PAGEALIGN(ifa, ps);
+ pte &= ~PAGE_FLAGS_RV_MASK;
+
+ if (ps != _PAGE_SIZE_16M)
+ thash_purge_entries(vcpu, va, ps);
+ gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
+ if (__gpfn_is_io(gpfn))
+ pte |= VTLB_PTE_IO;
+ rid = vcpu_get_rr(vcpu, va);
+ rid = rid & RR_RID_MASK;
+ p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
+ vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
+ pte, itir, va, rid);
+ vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
+}
+
+void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
+{
+ int index;
+ u64 va;
+
+ va = PAGEALIGN(ifa, ps);
+ while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
+ vcpu->arch.dtrs[index].page_flags = 0;
+
+ thash_purge_entries(vcpu, va, ps);
+}
+
+void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
+{
+ int index;
+ u64 va;
+
+ va = PAGEALIGN(ifa, ps);
+ while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
+ vcpu->arch.itrs[index].page_flags = 0;
+
+ thash_purge_entries(vcpu, va, ps);
+}
+
+void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
+{
+ va = PAGEALIGN(va, ps);
+ thash_purge_entries(vcpu, va, ps);
+}
+
+void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
+{
+ thash_purge_all(vcpu);
+}
+
+void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
+{
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+ long psr;
+ local_irq_save(psr);
+ p->exit_reason = EXIT_REASON_PTC_G;
+
+ p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
+ p->u.ptc_g_data.vaddr = va;
+ p->u.ptc_g_data.ps = ps;
+ vmm_transition(vcpu);
+ /* Do Local Purge Here*/
+ vcpu_ptc_l(vcpu, va, ps);
+ local_irq_restore(psr);
+}
+
+
+void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
+{
+ vcpu_ptc_ga(vcpu, va, ps);
+}
+
+void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ vcpu_ptc_e(vcpu, ifa);
+}
+
+void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long ifa, itir;
+
+ ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+ itir = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long itir, ifa, pte, slot;
+
+ slot = vcpu_get_gr(vcpu, inst.M45.r3);
+ pte = vcpu_get_gr(vcpu, inst.M45.r2);
+ itir = vcpu_get_itir(vcpu);
+ ifa = vcpu_get_ifa(vcpu);
+ vcpu_itr_d(vcpu, slot, pte, itir, ifa);
+}
+
+
+
+void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long itir, ifa, pte, slot;
+
+ slot = vcpu_get_gr(vcpu, inst.M45.r3);
+ pte = vcpu_get_gr(vcpu, inst.M45.r2);
+ itir = vcpu_get_itir(vcpu);
+ ifa = vcpu_get_ifa(vcpu);
+ vcpu_itr_i(vcpu, slot, pte, itir, ifa);
+}
+
+void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long itir, ifa, pte;
+
+ itir = vcpu_get_itir(vcpu);
+ ifa = vcpu_get_ifa(vcpu);
+ pte = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_itc_d(vcpu, pte, itir, ifa);
+}
+
+void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long itir, ifa, pte;
+
+ itir = vcpu_get_itir(vcpu);
+ ifa = vcpu_get_ifa(vcpu);
+ pte = vcpu_get_gr(vcpu, inst.M45.r2);
+ vcpu_itc_i(vcpu, pte, itir, ifa);
+}
+
+/*************************************
+ * Moves to semi-privileged registers
+ *************************************/
+
+void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long imm;
+
+ if (inst.M30.s)
+ imm = -inst.M30.imm;
+ else
+ imm = inst.M30.imm;
+
+ vcpu_set_itc(vcpu, imm);
+}
+
+void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r2;
+
+ r2 = vcpu_get_gr(vcpu, inst.M29.r2);
+ vcpu_set_itc(vcpu, r2);
+}
+
+
+void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r1;
+
+ r1 = vcpu_get_itc(vcpu);
+ vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
+}
+/**************************************************************************
+ struct kvm_vcpu*protection key register access routines
+ **************************************************************************/
+
+unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
+{
+ return ((unsigned long)ia64_get_pkr(reg));
+}
+
+void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
+{
+ ia64_set_pkr(reg, val);
+}
+
+
+unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
+{
+ union ia64_rr rr, rr1;
+
+ rr.val = vcpu_get_rr(vcpu, ifa);
+ rr1.val = 0;
+ rr1.ps = rr.ps;
+ rr1.rid = rr.rid;
+ return (rr1.val);
+}
+
+
+
+/********************************
+ * Moves to privileged registers
+ ********************************/
+unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
+ unsigned long val)
+{
+ union ia64_rr oldrr, newrr;
+ unsigned long rrval;
+ struct exit_ctl_data *p = &vcpu->arch.exit_data;
+ unsigned long psr;
+
+ oldrr.val = vcpu_get_rr(vcpu, reg);
+ newrr.val = val;
+ vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
+
+ switch ((unsigned long)(reg >> VRN_SHIFT)) {
+ case VRN6:
+ vcpu->arch.vmm_rr = vrrtomrr(val);
+ local_irq_save(psr);
+ p->exit_reason = EXIT_REASON_SWITCH_RR6;
+ vmm_transition(vcpu);
+ local_irq_restore(psr);
+ break;
+ case VRN4:
+ rrval = vrrtomrr(val);
+ vcpu->arch.metaphysical_saved_rr4 = rrval;
+ if (!is_physical_mode(vcpu))
+ ia64_set_rr(reg, rrval);
+ break;
+ case VRN0:
+ rrval = vrrtomrr(val);
+ vcpu->arch.metaphysical_saved_rr0 = rrval;
+ if (!is_physical_mode(vcpu))
+ ia64_set_rr(reg, rrval);
+ break;
+ default:
+ ia64_set_rr(reg, vrrtomrr(val));
+ break;
+ }
+
+ return (IA64_NO_FAULT);
+}
+
+
+
+void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r2;
+
+ r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+ r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+ vcpu_set_rr(vcpu, r3, r2);
+}
+
+void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+}
+
+void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+}
+
+void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r2;
+
+ r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+ r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+ vcpu_set_pmc(vcpu, r3, r2);
+}
+
+void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r2;
+
+ r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+ r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+ vcpu_set_pmd(vcpu, r3, r2);
+}
+
+void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ u64 r3, r2;
+
+ r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+ r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+ vcpu_set_pkr(vcpu, r3, r2);
+}
+
+
+
+void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_rr(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_pkr(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_dbr(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_ibr(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_pmc(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+
+unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
+{
+ /* FIXME: This could get called as a result of a rsvd-reg fault */
+ if (reg > (ia64_get_cpuid(3) & 0xff))
+ return 0;
+ else
+ return ia64_get_cpuid(reg);
+}
+
+void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r3, r1;
+
+ r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+ r1 = vcpu_get_cpuid(vcpu, r3);
+ vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ VCPU(vcpu, tpr) = val;
+ vcpu->arch.irq_check = 1;
+}
+
+unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long r2;
+
+ r2 = vcpu_get_gr(vcpu, inst.M32.r2);
+ VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
+
+ switch (inst.M32.cr3) {
+ case 0:
+ vcpu_set_dcr(vcpu, r2);
+ break;
+ case 1:
+ vcpu_set_itm(vcpu, r2);
+ break;
+ case 66:
+ vcpu_set_tpr(vcpu, r2);
+ break;
+ case 67:
+ vcpu_set_eoi(vcpu, r2);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long tgt = inst.M33.r1;
+ unsigned long val;
+
+ switch (inst.M33.cr3) {
+ case 65:
+ val = vcpu_get_ivr(vcpu);
+ vcpu_set_gr(vcpu, tgt, val, 0);
+ break;
+
+ case 67:
+ vcpu_set_gr(vcpu, tgt, 0L, 0);
+ break;
+ default:
+ val = VCPU(vcpu, vcr[inst.M33.cr3]);
+ vcpu_set_gr(vcpu, tgt, val, 0);
+ break;
+ }
+
+ return 0;
+}
+
+
+
+void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
+{
+
+ unsigned long mask;
+ struct kvm_pt_regs *regs;
+ struct ia64_psr old_psr, new_psr;
+
+ old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+
+ regs = vcpu_regs(vcpu);
+ /* We only support guest as:
+ * vpsr.pk = 0
+ * vpsr.is = 0
+ * Otherwise panic
+ */
+ if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
+ panic_vm(vcpu);
+
+ /*
+ * For those IA64_PSR bits: id/da/dd/ss/ed/ia
+ * Since these bits will become 0, after success execution of each
+ * instruction, we will change set them to mIA64_PSR
+ */
+ VCPU(vcpu, vpsr) = val
+ & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
+ IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
+
+ if (!old_psr.i && (val & IA64_PSR_I)) {
+ /* vpsr.i 0->1 */
+ vcpu->arch.irq_check = 1;
+ }
+ new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+
+ /*
+ * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
+ * , except for the following bits:
+ * ic/i/dt/si/rt/mc/it/bn/vm
+ */
+ mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
+ IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
+ IA64_PSR_VM;
+
+ regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
+
+ check_mm_mode_switch(vcpu, old_psr, new_psr);
+
+ return ;
+}
+
+unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
+{
+ struct ia64_psr vpsr;
+
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+
+ if (!vpsr.ic)
+ VCPU(vcpu, ifs) = regs->cr_ifs;
+ regs->cr_ifs = IA64_IFS_V;
+ return (IA64_NO_FAULT);
+}
+
+
+
+/**************************************************************************
+ VCPU banked general register access routines
+ **************************************************************************/
+#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
+ do { \
+ __asm__ __volatile__ ( \
+ ";;extr.u %0 = %3,%6,16;;\n" \
+ "dep %1 = %0, %1, 0, 16;;\n" \
+ "st8 [%4] = %1\n" \
+ "extr.u %0 = %2, 16, 16;;\n" \
+ "dep %3 = %0, %3, %6, 16;;\n" \
+ "st8 [%5] = %3\n" \
+ ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
+ "r"(*runat), "r"(b1unat), "r"(runat), \
+ "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
+ } while (0)
+
+void vcpu_bsw0(struct kvm_vcpu *vcpu)
+{
+ unsigned long i;
+
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ unsigned long *r = &regs->r16;
+ unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
+ unsigned long *b1 = &VCPU(vcpu, vgr[0]);
+ unsigned long *runat = &regs->eml_unat;
+ unsigned long *b0unat = &VCPU(vcpu, vbnat);
+ unsigned long *b1unat = &VCPU(vcpu, vnat);
+
+
+ if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
+ for (i = 0; i < 16; i++) {
+ *b1++ = *r;
+ *r++ = *b0++;
+ }
+ vcpu_bsw0_unat(i, b0unat, b1unat, runat,
+ VMM_PT_REGS_R16_SLOT);
+ VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
+ }
+}
+
+#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
+ do { \
+ __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
+ "dep %1 = %0, %1, 16, 16;;\n" \
+ "st8 [%4] = %1\n" \
+ "extr.u %0 = %2, 0, 16;;\n" \
+ "dep %3 = %0, %3, %6, 16;;\n" \
+ "st8 [%5] = %3\n" \
+ ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
+ "r"(*runat), "r"(b0unat), "r"(runat), \
+ "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
+ } while (0)
+
+void vcpu_bsw1(struct kvm_vcpu *vcpu)
+{
+ unsigned long i;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ unsigned long *r = &regs->r16;
+ unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
+ unsigned long *b1 = &VCPU(vcpu, vgr[0]);
+ unsigned long *runat = &regs->eml_unat;
+ unsigned long *b0unat = &VCPU(vcpu, vbnat);
+ unsigned long *b1unat = &VCPU(vcpu, vnat);
+
+ if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
+ for (i = 0; i < 16; i++) {
+ *b0++ = *r;
+ *r++ = *b1++;
+ }
+ vcpu_bsw1_unat(i, b0unat, b1unat, runat,
+ VMM_PT_REGS_R16_SLOT);
+ VCPU(vcpu, vpsr) |= IA64_PSR_BN;
+ }
+}
+
+
+
+
+void vcpu_rfi(struct kvm_vcpu *vcpu)
+{
+ unsigned long ifs, psr;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ psr = VCPU(vcpu, ipsr);
+ if (psr & IA64_PSR_BN)
+ vcpu_bsw1(vcpu);
+ else
+ vcpu_bsw0(vcpu);
+ vcpu_set_psr(vcpu, psr);
+ ifs = VCPU(vcpu, ifs);
+ if (ifs >> 63)
+ regs->cr_ifs = ifs;
+ regs->cr_iip = VCPU(vcpu, iip);
+}
+
+
+/*
+ VPSR can't keep track of below bits of guest PSR
+ This function gets guest PSR
+ */
+
+unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
+{
+ unsigned long mask;
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+ mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
+ IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
+ return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
+}
+
+void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long vpsr;
+ unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
+ | inst.M44.imm;
+
+ vpsr = vcpu_get_psr(vcpu);
+ vpsr &= (~imm24);
+ vcpu_set_psr(vcpu, vpsr);
+}
+
+void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long vpsr;
+ unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
+ | inst.M44.imm;
+
+ vpsr = vcpu_get_psr(vcpu);
+ vpsr |= imm24;
+ vcpu_set_psr(vcpu, vpsr);
+}
+
+/* Generate Mask
+ * Parameter:
+ * bit -- starting bit
+ * len -- how many bits
+ */
+#define MASK(bit,len) \
+({ \
+ __u64 ret; \
+ \
+ __asm __volatile("dep %0=-1, r0, %1, %2"\
+ : "=r" (ret): \
+ "M" (bit), \
+ "M" (len)); \
+ ret; \
+})
+
+void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
+ vcpu_set_psr(vcpu, val);
+}
+
+void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long val;
+
+ val = vcpu_get_gr(vcpu, inst.M35.r2);
+ vcpu_set_psr_l(vcpu, val);
+}
+
+void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+ unsigned long val;
+
+ val = vcpu_get_psr(vcpu);
+ val = (val & MASK(0, 32)) | (val & MASK(35, 2));
+ vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
+}
+
+void vcpu_increment_iip(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
+ if (ipsr->ri == 2) {
+ ipsr->ri = 0;
+ regs->cr_iip += 16;
+ } else
+ ipsr->ri++;
+}
+
+void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+ struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
+
+ if (ipsr->ri == 0) {
+ ipsr->ri = 2;
+ regs->cr_iip -= 16;
+ } else
+ ipsr->ri--;
+}
+
+/** Emulate a privileged operation.
+ *
+ *
+ * @param vcpu virtual cpu
+ * @cause the reason cause virtualization fault
+ * @opcode the instruction code which cause virtualization fault
+ */
+
+void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
+{
+ unsigned long status, cause, opcode ;
+ INST64 inst;
+
+ status = IA64_NO_FAULT;
+ cause = VMX(vcpu, cause);
+ opcode = VMX(vcpu, opcode);
+ inst.inst = opcode;
+ /*
+ * Switch to actual virtual rid in rr0 and rr4,
+ * which is required by some tlb related instructions.
+ */
+ prepare_if_physical_mode(vcpu);
+
+ switch (cause) {
+ case EVENT_RSM:
+ kvm_rsm(vcpu, inst);
+ break;
+ case EVENT_SSM:
+ kvm_ssm(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_PSR:
+ kvm_mov_to_psr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_PSR:
+ kvm_mov_from_psr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_CR:
+ kvm_mov_from_cr(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_CR:
+ kvm_mov_to_cr(vcpu, inst);
+ break;
+ case EVENT_BSW_0:
+ vcpu_bsw0(vcpu);
+ break;
+ case EVENT_BSW_1:
+ vcpu_bsw1(vcpu);
+ break;
+ case EVENT_COVER:
+ vcpu_cover(vcpu);
+ break;
+ case EVENT_RFI:
+ vcpu_rfi(vcpu);
+ break;
+ case EVENT_ITR_D:
+ kvm_itr_d(vcpu, inst);
+ break;
+ case EVENT_ITR_I:
+ kvm_itr_i(vcpu, inst);
+ break;
+ case EVENT_PTR_D:
+ kvm_ptr_d(vcpu, inst);
+ break;
+ case EVENT_PTR_I:
+ kvm_ptr_i(vcpu, inst);
+ break;
+ case EVENT_ITC_D:
+ kvm_itc_d(vcpu, inst);
+ break;
+ case EVENT_ITC_I:
+ kvm_itc_i(vcpu, inst);
+ break;
+ case EVENT_PTC_L:
+ kvm_ptc_l(vcpu, inst);
+ break;
+ case EVENT_PTC_G:
+ kvm_ptc_g(vcpu, inst);
+ break;
+ case EVENT_PTC_GA:
+ kvm_ptc_ga(vcpu, inst);
+ break;
+ case EVENT_PTC_E:
+ kvm_ptc_e(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_RR:
+ kvm_mov_to_rr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_RR:
+ kvm_mov_from_rr(vcpu, inst);
+ break;
+ case EVENT_THASH:
+ kvm_thash(vcpu, inst);
+ break;
+ case EVENT_TTAG:
+ kvm_ttag(vcpu, inst);
+ break;
+ case EVENT_TPA:
+ status = kvm_tpa(vcpu, inst);
+ break;
+ case EVENT_TAK:
+ kvm_tak(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_AR_IMM:
+ kvm_mov_to_ar_imm(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_AR:
+ kvm_mov_to_ar_reg(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_AR:
+ kvm_mov_from_ar_reg(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_DBR:
+ kvm_mov_to_dbr(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_IBR:
+ kvm_mov_to_ibr(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_PMC:
+ kvm_mov_to_pmc(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_PMD:
+ kvm_mov_to_pmd(vcpu, inst);
+ break;
+ case EVENT_MOV_TO_PKR:
+ kvm_mov_to_pkr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_DBR:
+ kvm_mov_from_dbr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_IBR:
+ kvm_mov_from_ibr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_PMC:
+ kvm_mov_from_pmc(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_PKR:
+ kvm_mov_from_pkr(vcpu, inst);
+ break;
+ case EVENT_MOV_FROM_CPUID:
+ kvm_mov_from_cpuid(vcpu, inst);
+ break;
+ case EVENT_VMSW:
+ status = IA64_FAULT;
+ break;
+ default:
+ break;
+ };
+ /*Assume all status is NO_FAULT ?*/
+ if (status == IA64_NO_FAULT && cause != EVENT_RFI)
+ vcpu_increment_iip(vcpu);
+
+ recover_if_physical_mode(vcpu);
+}
+
+void init_vcpu(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ vcpu->arch.mode_flags = GUEST_IN_PHY;
+ VMX(vcpu, vrr[0]) = 0x38;
+ VMX(vcpu, vrr[1]) = 0x38;
+ VMX(vcpu, vrr[2]) = 0x38;
+ VMX(vcpu, vrr[3]) = 0x38;
+ VMX(vcpu, vrr[4]) = 0x38;
+ VMX(vcpu, vrr[5]) = 0x38;
+ VMX(vcpu, vrr[6]) = 0x38;
+ VMX(vcpu, vrr[7]) = 0x38;
+ VCPU(vcpu, vpsr) = IA64_PSR_BN;
+ VCPU(vcpu, dcr) = 0;
+ /* pta.size must not be 0. The minimum is 15 (32k) */
+ VCPU(vcpu, pta) = 15 << 2;
+ VCPU(vcpu, itv) = 0x10000;
+ VCPU(vcpu, itm) = 0;
+ VMX(vcpu, last_itc) = 0;
+
+ VCPU(vcpu, lid) = VCPU_LID(vcpu);
+ VCPU(vcpu, ivr) = 0;
+ VCPU(vcpu, tpr) = 0x10000;
+ VCPU(vcpu, eoi) = 0;
+ VCPU(vcpu, irr[0]) = 0;
+ VCPU(vcpu, irr[1]) = 0;
+ VCPU(vcpu, irr[2]) = 0;
+ VCPU(vcpu, irr[3]) = 0;
+ VCPU(vcpu, pmv) = 0x10000;
+ VCPU(vcpu, cmcv) = 0x10000;
+ VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
+ VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
+ update_vhpi(vcpu, NULL_VECTOR);
+ VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
+
+ for (i = 0; i < 4; i++)
+ VLSAPIC_INSVC(vcpu, i) = 0;
+}
+
+void kvm_init_all_rr(struct kvm_vcpu *vcpu)
+{
+ unsigned long psr;
+
+ local_irq_save(psr);
+
+ /* WARNING: not allow co-exist of both virtual mode and physical
+ * mode in same region
+ */
+
+ vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
+ vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
+
+ if (is_physical_mode(vcpu)) {
+ if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
+ panic_vm(vcpu);
+
+ ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
+ ia64_dv_serialize_data();
+ } else {
+ ia64_set_rr((VRN0 << VRN_SHIFT),
+ vcpu->arch.metaphysical_saved_rr0);
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN4 << VRN_SHIFT),
+ vcpu->arch.metaphysical_saved_rr4);
+ ia64_dv_serialize_data();
+ }
+ ia64_set_rr((VRN1 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN1])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN2 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN2])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN3 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN3])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN5 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN5])));
+ ia64_dv_serialize_data();
+ ia64_set_rr((VRN7 << VRN_SHIFT),
+ vrrtomrr(VMX(vcpu, vrr[VRN7])));
+ ia64_dv_serialize_data();
+ ia64_srlz_d();
+ ia64_set_psr(psr);
+}
+
+int vmm_entry(void)
+{
+ struct kvm_vcpu *v;
+ v = current_vcpu;
+
+ ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
+ 0, 0, 0, 0, 0, 0);
+ kvm_init_vtlb(v);
+ kvm_init_vhpt(v);
+ init_vcpu(v);
+ kvm_init_all_rr(v);
+ vmm_reset_entry();
+
+ return 0;
+}
+
+void panic_vm(struct kvm_vcpu *v)
+{
+ struct exit_ctl_data *p = &v->arch.exit_data;
+
+ p->exit_reason = EXIT_REASON_VM_PANIC;
+ vmm_transition(v);
+ /*Never to return*/
+ while (1);
+}
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
new file mode 100644
index 00000000000..b0fcfb62c49
--- /dev/null
+++ b/arch/ia64/kvm/vcpu.h
@@ -0,0 +1,740 @@
+/*
+ * vcpu.h: vcpu routines
+ * Copyright (c) 2005, Intel Corporation.
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+
+#ifndef __KVM_VCPU_H__
+#define __KVM_VCPU_H__
+
+#include <asm/types.h>
+#include <asm/fpu.h>
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+#include "vti.h"
+
+#include <linux/kvm_host.h>
+#include <linux/spinlock.h>
+
+typedef unsigned long IA64_INST;
+
+typedef union U_IA64_BUNDLE {
+ unsigned long i64[2];
+ struct { unsigned long template:5, slot0:41, slot1a:18,
+ slot1b:23, slot2:41; };
+ /* NOTE: following doesn't work because bitfields can't cross natural
+ size boundaries
+ struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
+} IA64_BUNDLE;
+
+typedef union U_INST64_A5 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
+ imm9d:9, s:1, major:4; };
+} INST64_A5;
+
+typedef union U_INST64_B4 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
+ wh:2, d:1, un1:1, major:4; };
+} INST64_B4;
+
+typedef union U_INST64_B8 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
+} INST64_B8;
+
+typedef union U_INST64_B9 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
+} INST64_B9;
+
+typedef union U_INST64_I19 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
+} INST64_I19;
+
+typedef union U_INST64_I26 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_I26;
+
+typedef union U_INST64_I27 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
+} INST64_I27;
+
+typedef union U_INST64_I28 { /* not privileged (mov from AR) */
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_I28;
+
+typedef union U_INST64_M28 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M28;
+
+typedef union U_INST64_M29 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M29;
+
+typedef union U_INST64_M30 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
+ x3:3, s:1, major:4; };
+} INST64_M30;
+
+typedef union U_INST64_M31 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M31;
+
+typedef union U_INST64_M32 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M32;
+
+typedef union U_INST64_M33 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M33;
+
+typedef union U_INST64_M35 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
+
+} INST64_M35;
+
+typedef union U_INST64_M36 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
+} INST64_M36;
+
+typedef union U_INST64_M37 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
+ i:1, major:4; };
+} INST64_M37;
+
+typedef union U_INST64_M41 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
+} INST64_M41;
+
+typedef union U_INST64_M42 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M42;
+
+typedef union U_INST64_M43 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M43;
+
+typedef union U_INST64_M44 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
+} INST64_M44;
+
+typedef union U_INST64_M45 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M45;
+
+typedef union U_INST64_M46 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
+ x3:3, un1:1, major:4; };
+} INST64_M46;
+
+typedef union U_INST64_M47 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
+} INST64_M47;
+
+typedef union U_INST64_M1{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M1;
+
+typedef union U_INST64_M2{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M2;
+
+typedef union U_INST64_M3{
+ IA64_INST inst;
+ struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
+ x6:6, s:1, major:4; };
+} INST64_M3;
+
+typedef union U_INST64_M4 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M4;
+
+typedef union U_INST64_M5 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
+ x6:6, s:1, major:4; };
+} INST64_M5;
+
+typedef union U_INST64_M6 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M6;
+
+typedef union U_INST64_M9 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M9;
+
+typedef union U_INST64_M10 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
+ x6:6, s:1, major:4; };
+} INST64_M10;
+
+typedef union U_INST64_M12 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
+ x6:6, m:1, major:4; };
+} INST64_M12;
+
+typedef union U_INST64_M15 {
+ IA64_INST inst;
+ struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
+ x6:6, s:1, major:4; };
+} INST64_M15;
+
+typedef union U_INST64 {
+ IA64_INST inst;
+ struct { unsigned long :37, major:4; } generic;
+ INST64_A5 A5; /* used in build_hypercall_bundle only */
+ INST64_B4 B4; /* used in build_hypercall_bundle only */
+ INST64_B8 B8; /* rfi, bsw.[01] */
+ INST64_B9 B9; /* break.b */
+ INST64_I19 I19; /* used in build_hypercall_bundle only */
+ INST64_I26 I26; /* mov register to ar (I unit) */
+ INST64_I27 I27; /* mov immediate to ar (I unit) */
+ INST64_I28 I28; /* mov from ar (I unit) */
+ INST64_M1 M1; /* ld integer */
+ INST64_M2 M2;
+ INST64_M3 M3;
+ INST64_M4 M4; /* st integer */
+ INST64_M5 M5;
+ INST64_M6 M6; /* ldfd floating pointer */
+ INST64_M9 M9; /* stfd floating pointer */
+ INST64_M10 M10; /* stfd floating pointer */
+ INST64_M12 M12; /* ldfd pair floating pointer */
+ INST64_M15 M15; /* lfetch + imm update */
+ INST64_M28 M28; /* purge translation cache entry */
+ INST64_M29 M29; /* mov register to ar (M unit) */
+ INST64_M30 M30; /* mov immediate to ar (M unit) */
+ INST64_M31 M31; /* mov from ar (M unit) */
+ INST64_M32 M32; /* mov reg to cr */
+ INST64_M33 M33; /* mov from cr */
+ INST64_M35 M35; /* mov to psr */
+ INST64_M36 M36; /* mov from psr */
+ INST64_M37 M37; /* break.m */
+ INST64_M41 M41; /* translation cache insert */
+ INST64_M42 M42; /* mov to indirect reg/translation reg insert*/
+ INST64_M43 M43; /* mov from indirect reg */
+ INST64_M44 M44; /* set/reset system mask */
+ INST64_M45 M45; /* translation purge */
+ INST64_M46 M46; /* translation access (tpa,tak) */
+ INST64_M47 M47; /* purge translation entry */
+} INST64;
+
+#define MASK_41 ((unsigned long)0x1ffffffffff)
+
+/* Virtual address memory attributes encoding */
+#define VA_MATTR_WB 0x0
+#define VA_MATTR_UC 0x4
+#define VA_MATTR_UCE 0x5
+#define VA_MATTR_WC 0x6
+#define VA_MATTR_NATPAGE 0x7
+
+#define PMASK(size) (~((size) - 1))
+#define PSIZE(size) (1UL<<(size))
+#define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits))
+#define PAGEALIGN(va, ps) CLEARLSB(va, ps)
+#define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53))
+#define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */
+
+#define ARCH_PAGE_SHIFT 12
+
+#define INVALID_TI_TAG (1UL << 63)
+
+#define VTLB_PTE_P_BIT 0
+#define VTLB_PTE_IO_BIT 60
+#define VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT)
+#define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT)
+
+#define vcpu_quick_region_check(_tr_regions,_ifa) \
+ (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
+
+#define vcpu_quick_region_set(_tr_regions,_ifa) \
+ do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
+
+static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
+ u64 va, u64 rid)
+{
+ trp->page_flags = pte;
+ trp->itir = itir;
+ trp->vadr = va;
+ trp->rid = rid;
+}
+
+extern u64 kvm_lookup_mpa(u64 gpfn);
+extern u64 kvm_gpa_to_mpa(u64 gpa);
+
+/* Return I/O type if trye */
+#define __gpfn_is_io(gpfn) \
+ ({ \
+ u64 pte, ret = 0; \
+ pte = kvm_lookup_mpa(gpfn); \
+ if (!(pte & GPFN_INV_MASK)) \
+ ret = pte & GPFN_IO_MASK; \
+ ret; \
+ })
+
+#endif
+
+#define IA64_NO_FAULT 0
+#define IA64_FAULT 1
+
+#define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15)
+
+#define SW_BAD 0 /* Bad mode transitition */
+#define SW_V2P 1 /* Physical emulatino is activated */
+#define SW_P2V 2 /* Exit physical mode emulation */
+#define SW_SELF 3 /* No mode transition */
+#define SW_NOP 4 /* Mode transition, but without action required */
+
+#define GUEST_IN_PHY 0x1
+#define GUEST_PHY_EMUL 0x2
+
+#define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP))
+
+#define VRN_SHIFT 61
+#define VRN_MASK 0xe000000000000000
+#define VRN0 0x0UL
+#define VRN1 0x1UL
+#define VRN2 0x2UL
+#define VRN3 0x3UL
+#define VRN4 0x4UL
+#define VRN5 0x5UL
+#define VRN6 0x6UL
+#define VRN7 0x7UL
+
+#define IRQ_NO_MASKED 0
+#define IRQ_MASKED_BY_VTPR 1
+#define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */
+
+#define PTA_BASE_SHIFT 15
+
+#define IA64_PSR_VM_BIT 46
+#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
+
+/* Interruption Function State */
+#define IA64_IFS_V_BIT 63
+#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
+
+#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
+#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/gcc_intrin.h>
+
+#define is_physical_mode(v) \
+ ((v->arch.mode_flags) & GUEST_IN_PHY)
+
+#define is_virtual_mode(v) \
+ (!is_physical_mode(v))
+
+#define MODE_IND(psr) \
+ (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
+
+#define _vmm_raw_spin_lock(x) \
+ do { \
+ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
+ __u64 ia64_spinlock_val; \
+ ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
+ if (unlikely(ia64_spinlock_val)) { \
+ do { \
+ while (*ia64_spinlock_ptr) \
+ ia64_barrier(); \
+ ia64_spinlock_val = \
+ ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
+ } while (ia64_spinlock_val); \
+ } \
+ } while (0)
+
+#define _vmm_raw_spin_unlock(x) \
+ do { barrier(); \
+ ((spinlock_t *)x)->raw_lock.lock = 0; } \
+while (0)
+
+void vmm_spin_lock(spinlock_t *lock);
+void vmm_spin_unlock(spinlock_t *lock);
+enum {
+ I_TLB = 1,
+ D_TLB = 2
+};
+
+union kvm_va {
+ struct {
+ unsigned long off : 60; /* intra-region offset */
+ unsigned long reg : 4; /* region number */
+ } f;
+ unsigned long l;
+ void *p;
+};
+
+#define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \
+ _v.f.reg = 0; _v.l; })
+#define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \
+ _v.f.reg = -1; _v.p; })
+
+#define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \
+ _v.rid; })
+#define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \
+ _v.ps; })
+#define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \
+ _v.ve; })
+
+enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF };
+enum tlb_miss_type { INSTRUCTION, DATA, REGISTER };
+
+#define VCPU(_v, _x) ((_v)->arch.vpd->_x)
+#define VMX(_v, _x) ((_v)->arch._x)
+
+#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
+#define VLSAPIC_XTP(_v) VMX(_v, xtp)
+
+static inline unsigned long itir_ps(unsigned long itir)
+{
+ return ((itir >> 2) & 0x3f);
+}
+
+
+/**************************************************************************
+ VCPU control register access routines
+ **************************************************************************/
+
+static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, itir));
+}
+
+static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, itir) = val;
+}
+
+static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, ifa));
+}
+
+static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, ifa) = val;
+}
+
+static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, iva));
+}
+
+static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, pta));
+}
+
+static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, lid));
+}
+
+static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, tpr));
+}
+
+static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu)
+{
+ return (0UL); /*reads of eoi always return 0 */
+}
+
+static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, irr[0]));
+}
+
+static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, irr[1]));
+}
+
+static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, irr[2]));
+}
+
+static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu)
+{
+ return ((u64)VCPU(vcpu, irr[3]));
+}
+
+static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val)
+{
+ ia64_setreg(_IA64_REG_CR_DCR, val);
+}
+
+static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, isr) = val;
+}
+
+static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, lid) = val;
+}
+
+static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, ipsr) = val;
+}
+
+static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, iip) = val;
+}
+
+static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, ifs) = val;
+}
+
+static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, iipa) = val;
+}
+
+static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val)
+{
+ VCPU(vcpu, iha) = val;
+}
+
+
+static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg)
+{
+ return vcpu->arch.vrr[reg>>61];
+}
+
+/**************************************************************************
+ VCPU debug breakpoint register access routines
+ **************************************************************************/
+
+static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+ __ia64_set_dbr(reg, val);
+}
+
+static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+ ia64_set_ibr(reg, val);
+}
+
+static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg)
+{
+ return ((u64)__ia64_get_dbr(reg));
+}
+
+static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg)
+{
+ return ((u64)ia64_get_ibr(reg));
+}
+
+/**************************************************************************
+ VCPU performance monitor register access routines
+ **************************************************************************/
+static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+ /* NOTE: Writes to unimplemented PMC registers are discarded */
+ ia64_set_pmc(reg, val);
+}
+
+static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+ /* NOTE: Writes to unimplemented PMD registers are discarded */
+ ia64_set_pmd(reg, val);
+}
+
+static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg)
+{
+ /* NOTE: Reads from unimplemented PMC registers return zero */
+ return ((u64)ia64_get_pmc(reg));
+}
+
+static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg)
+{
+ /* NOTE: Reads from unimplemented PMD registers return zero */
+ return ((u64)ia64_get_pmd(reg));
+}
+
+static inline unsigned long vrrtomrr(unsigned long val)
+{
+ union ia64_rr rr;
+ rr.val = val;
+ rr.rid = (rr.rid << 4) | 0xe;
+ if (rr.ps > PAGE_SHIFT)
+ rr.ps = PAGE_SHIFT;
+ rr.ve = 1;
+ return rr.val;
+}
+
+
+static inline int highest_bits(int *dat)
+{
+ u32 bits, bitnum;
+ int i;
+
+ /* loop for all 256 bits */
+ for (i = 7; i >= 0 ; i--) {
+ bits = dat[i];
+ if (bits) {
+ bitnum = fls(bits);
+ return i * 32 + bitnum - 1;
+ }
+ }
+ return NULL_VECTOR;
+}
+
+/*
+ * The pending irq is higher than the inservice one.
+ *
+ */
+static inline int is_higher_irq(int pending, int inservice)
+{
+ return ((pending > inservice)
+ || ((pending != NULL_VECTOR)
+ && (inservice == NULL_VECTOR)));
+}
+
+static inline int is_higher_class(int pending, int mic)
+{
+ return ((pending >> 4) > mic);
+}
+
+/*
+ * Return 0-255 for pending irq.
+ * NULL_VECTOR: when no pending.
+ */
+static inline int highest_pending_irq(struct kvm_vcpu *vcpu)
+{
+ if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR))
+ return NMI_VECTOR;
+ if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR))
+ return ExtINT_VECTOR;
+
+ return highest_bits((int *)&VCPU(vcpu, irr[0]));
+}
+
+static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
+{
+ if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR))
+ return NMI_VECTOR;
+ if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR))
+ return ExtINT_VECTOR;
+
+ return highest_bits((int *)&(VMX(vcpu, insvc[0])));
+}
+
+extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg,
+ struct ia64_fpreg *val);
+extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg,
+ struct ia64_fpreg *val);
+extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg);
+extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat);
+extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu);
+extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val);
+extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
+extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
+extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
+ u64 itir, u64 va, int type);
+extern struct thash_data *vhpt_lookup(u64 va);
+extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
+extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
+extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
+extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
+extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
+ u64 itir, u64 ifa, int type);
+extern void thash_purge_all(struct kvm_vcpu *v);
+extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
+ u64 va, int is_data);
+extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va,
+ u64 ps, int is_data);
+
+extern void vcpu_increment_iip(struct kvm_vcpu *v);
+extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu);
+extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
+extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
+extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr);
+extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr);
+extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr);
+extern void nested_dtlb(struct kvm_vcpu *vcpu);
+extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr);
+extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref);
+
+extern void update_vhpi(struct kvm_vcpu *vcpu, int vec);
+extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice);
+
+extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
+extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma);
+extern void vmm_transition(struct kvm_vcpu *vcpu);
+extern void vmm_trampoline(union context *from, union context *to);
+extern int vmm_entry(void);
+extern u64 vcpu_get_itc(struct kvm_vcpu *vcpu);
+
+extern void vmm_reset_entry(void);
+void kvm_init_vtlb(struct kvm_vcpu *v);
+void kvm_init_vhpt(struct kvm_vcpu *v);
+void thash_init(struct thash_cb *hcb, u64 sz);
+
+void panic_vm(struct kvm_vcpu *v);
+
+extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
+ u64 arg4, u64 arg5, u64 arg6, u64 arg7);
+#endif
+#endif /* __VCPU_H__ */
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
new file mode 100644
index 00000000000..2275bf4e681
--- /dev/null
+++ b/arch/ia64/kvm/vmm.c
@@ -0,0 +1,66 @@
+/*
+ * vmm.c: vmm module interface with kvm module
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+
+#include<linux/module.h>
+#include<asm/fpswa.h>
+
+#include "vcpu.h"
+
+MODULE_AUTHOR("Intel");
+MODULE_LICENSE("GPL");
+
+extern char kvm_ia64_ivt;
+extern fpswa_interface_t *vmm_fpswa_interface;
+
+struct kvm_vmm_info vmm_info = {
+ .module = THIS_MODULE,
+ .vmm_entry = vmm_entry,
+ .tramp_entry = vmm_trampoline,
+ .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
+};
+
+static int __init kvm_vmm_init(void)
+{
+
+ vmm_fpswa_interface = fpswa_interface;
+
+ /*Register vmm data to kvm side*/
+ return kvm_init(&vmm_info, 1024, THIS_MODULE);
+}
+
+static void __exit kvm_vmm_exit(void)
+{
+ kvm_exit();
+ return ;
+}
+
+void vmm_spin_lock(spinlock_t *lock)
+{
+ _vmm_raw_spin_lock(lock);
+}
+
+void vmm_spin_unlock(spinlock_t *lock)
+{
+ _vmm_raw_spin_unlock(lock);
+}
+module_init(kvm_vmm_init)
+module_exit(kvm_vmm_exit)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
new file mode 100644
index 00000000000..3ee5f481c06
--- /dev/null
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -0,0 +1,1424 @@
+/*
+ * /ia64/kvm_ivt.S
+ *
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 2000, 2002-2003 Intel Co
+ * Asit Mallick <asit.k.mallick@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Kenneth Chen <kenneth.w.chen@intel.com>
+ * Fenghua Yu <fenghua.yu@intel.com>
+ *
+ *
+ * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
+ * for SMP
+ * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
+ * handler now uses virtual PT.
+ *
+ * 07/6/20 Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
+ * Supporting Intel virtualization architecture
+ *
+ */
+
+/*
+ * This file defines the interruption vector table used by the CPU.
+ * It does not include one entry per possible cause of interruption.
+ *
+ * The first 20 entries of the table contain 64 bundles each while the
+ * remaining 48 entries contain only 16 bundles each.
+ *
+ * The 64 bundles are used to allow inlining the whole handler for
+ * critical
+ * interruptions like TLB misses.
+ *
+ * For each entry, the comment is as follows:
+ *
+ * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
+ * (12,51)
+ * entry offset ----/ / / /
+ * /
+ * entry number ---------/ / /
+ * /
+ * size of the entry -------------/ /
+ * /
+ * vector name -------------------------------------/
+ * /
+ * interruptions triggering this vector
+ * ----------------------/
+ *
+ * The table is 32KB in size and must be aligned on 32KB
+ * boundary.
+ * (The CPU ignores the 15 lower bits of the address)
+ *
+ * Table is based upon EAS2.6 (Oct 1999)
+ */
+
+
+#include <asm/asmmacro.h>
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+
+#include "asm-offsets.h"
+#include "vcpu.h"
+#include "kvm_minstate.h"
+#include "vti.h"
+
+#if 1
+# define PSR_DEFAULT_BITS psr.ac
+#else
+# define PSR_DEFAULT_BITS 0
+#endif
+
+
+#define KVM_FAULT(n) \
+ kvm_fault_##n:; \
+ mov r19=n;; \
+ br.sptk.many kvm_fault_##n; \
+ ;; \
+
+
+#define KVM_REFLECT(n) \
+ mov r31=pr; \
+ mov r19=n; /* prepare to save predicates */ \
+ mov r29=cr.ipsr; \
+ ;; \
+ tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
+(p7)br.sptk.many kvm_dispatch_reflection; \
+ br.sptk.many kvm_panic; \
+
+
+GLOBAL_ENTRY(kvm_panic)
+ br.sptk.many kvm_panic
+ ;;
+END(kvm_panic)
+
+
+
+
+
+ .section .text.ivt,"ax"
+
+ .align 32768 // align on 32KB boundary
+ .global kvm_ia64_ivt
+kvm_ia64_ivt:
+///////////////////////////////////////////////////////////////
+// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
+ENTRY(kvm_vhpt_miss)
+ KVM_FAULT(0)
+END(kvm_vhpt_miss)
+
+
+ .org kvm_ia64_ivt+0x400
+////////////////////////////////////////////////////////////////
+// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
+ENTRY(kvm_itlb_miss)
+ mov r31 = pr
+ mov r29=cr.ipsr;
+ ;;
+ tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
+ (p6) br.sptk kvm_alt_itlb_miss
+ mov r19 = 1
+ br.sptk kvm_itlb_miss_dispatch
+ KVM_FAULT(1);
+END(kvm_itlb_miss)
+
+ .org kvm_ia64_ivt+0x0800
+//////////////////////////////////////////////////////////////////
+// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
+ENTRY(kvm_dtlb_miss)
+ mov r31 = pr
+ mov r29=cr.ipsr;
+ ;;
+ tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
+(p6)br.sptk kvm_alt_dtlb_miss
+ br.sptk kvm_dtlb_miss_dispatch
+END(kvm_dtlb_miss)
+
+ .org kvm_ia64_ivt+0x0c00
+////////////////////////////////////////////////////////////////////
+// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
+ENTRY(kvm_alt_itlb_miss)
+ mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+ movl r17=PAGE_KERNEL
+ mov r24=cr.ipsr
+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+ ;;
+ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
+ ;;
+ or r19=r17,r19 // insert PTE control bits into r19
+ ;;
+ movl r20=IA64_GRANULE_SHIFT<<2
+ ;;
+ mov cr.itir=r20
+ ;;
+ itc.i r19 // insert the TLB entry
+ mov pr=r31,-1
+ rfi
+END(kvm_alt_itlb_miss)
+
+ .org kvm_ia64_ivt+0x1000
+/////////////////////////////////////////////////////////////////////
+// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
+ENTRY(kvm_alt_dtlb_miss)
+ mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+ movl r17=PAGE_KERNEL
+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+ mov r24=cr.ipsr
+ ;;
+ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
+ ;;
+ or r19=r19,r17 // insert PTE control bits into r19
+ ;;
+ movl r20=IA64_GRANULE_SHIFT<<2
+ ;;
+ mov cr.itir=r20
+ ;;
+ itc.d r19 // insert the TLB entry
+ mov pr=r31,-1
+ rfi
+END(kvm_alt_dtlb_miss)
+
+ .org kvm_ia64_ivt+0x1400
+//////////////////////////////////////////////////////////////////////
+// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
+ENTRY(kvm_nested_dtlb_miss)
+ KVM_FAULT(5)
+END(kvm_nested_dtlb_miss)
+
+ .org kvm_ia64_ivt+0x1800
+/////////////////////////////////////////////////////////////////////
+// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
+ENTRY(kvm_ikey_miss)
+ KVM_REFLECT(6)
+END(kvm_ikey_miss)
+
+ .org kvm_ia64_ivt+0x1c00
+/////////////////////////////////////////////////////////////////////
+// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
+ENTRY(kvm_dkey_miss)
+ KVM_REFLECT(7)
+END(kvm_dkey_miss)
+
+ .org kvm_ia64_ivt+0x2000
+////////////////////////////////////////////////////////////////////
+// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
+ENTRY(kvm_dirty_bit)
+ KVM_REFLECT(8)
+END(kvm_dirty_bit)
+
+ .org kvm_ia64_ivt+0x2400
+////////////////////////////////////////////////////////////////////
+// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
+ENTRY(kvm_iaccess_bit)
+ KVM_REFLECT(9)
+END(kvm_iaccess_bit)
+
+ .org kvm_ia64_ivt+0x2800
+///////////////////////////////////////////////////////////////////
+// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
+ENTRY(kvm_daccess_bit)
+ KVM_REFLECT(10)
+END(kvm_daccess_bit)
+
+ .org kvm_ia64_ivt+0x2c00
+/////////////////////////////////////////////////////////////////
+// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
+ENTRY(kvm_break_fault)
+ mov r31=pr
+ mov r19=11
+ mov r29=cr.ipsr
+ ;;
+ KVM_SAVE_MIN_WITH_COVER_R19
+ ;;
+ alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
+ mov out0=cr.ifa
+ mov out2=cr.isr // FIXME: pity to make this slow access twice
+ mov out3=cr.iim // FIXME: pity to make this slow access twice
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15)ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ KVM_SAVE_REST
+ mov rp=r14
+ ;;
+ adds out1=16,sp
+ br.call.sptk.many b6=kvm_ia64_handle_break
+ ;;
+END(kvm_break_fault)
+
+ .org kvm_ia64_ivt+0x3000
+/////////////////////////////////////////////////////////////////
+// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
+ENTRY(kvm_interrupt)
+ mov r31=pr // prepare to save predicates
+ mov r19=12
+ mov r29=cr.ipsr
+ ;;
+ tbit.z p6,p7=r29,IA64_PSR_VM_BIT
+ tbit.z p0,p15=r29,IA64_PSR_I_BIT
+ ;;
+(p7) br.sptk kvm_dispatch_interrupt
+ ;;
+ mov r27=ar.rsc /* M */
+ mov r20=r1 /* A */
+ mov r25=ar.unat /* M */
+ mov r26=ar.pfs /* I */
+ mov r28=cr.iip /* M */
+ cover /* B (or nothing) */
+ ;;
+ mov r1=sp
+ ;;
+ invala /* M */
+ mov r30=cr.ifs
+ ;;
+ addl r1=-VMM_PT_REGS_SIZE,r1
+ ;;
+ adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
+ adds r16=PT(CR_IPSR),r1
+ ;;
+ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
+ st8 [r16]=r29 /* save cr.ipsr */
+ ;;
+ lfetch.fault.excl.nt1 [r17]
+ mov r29=b0
+ ;;
+ adds r16=PT(R8),r1 /* initialize first base pointer */
+ adds r17=PT(R9),r1 /* initialize second base pointer */
+ mov r18=r0 /* make sure r18 isn't NaT */
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r8,16
+.mem.offset 8,0; st8.spill [r17]=r9,16
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r10,24
+.mem.offset 8,0; st8.spill [r17]=r11,24
+ ;;
+ st8 [r16]=r28,16 /* save cr.iip */
+ st8 [r17]=r30,16 /* save cr.ifs */
+ mov r8=ar.fpsr /* M */
+ mov r9=ar.csd
+ mov r10=ar.ssd
+ movl r11=FPSR_DEFAULT /* L-unit */
+ ;;
+ st8 [r16]=r25,16 /* save ar.unat */
+ st8 [r17]=r26,16 /* save ar.pfs */
+ shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
+ ;;
+ st8 [r16]=r27,16 /* save ar.rsc */
+ adds r17=16,r17 /* skip over ar_rnat field */
+ ;;
+ st8 [r17]=r31,16 /* save predicates */
+ adds r16=16,r16 /* skip over ar_bspstore field */
+ ;;
+ st8 [r16]=r29,16 /* save b0 */
+ st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
+.mem.offset 8,0; st8.spill [r17]=r12,16
+ adds r12=-16,r1
+ /* switch to kernel memory stack (with 16 bytes of scratch) */
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r13,16
+.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r15,16
+.mem.offset 8,0; st8.spill [r17]=r14,16
+ dep r14=-1,r0,60,4
+ ;;
+.mem.offset 0,0; st8.spill [r16]=r2,16
+.mem.offset 8,0; st8.spill [r17]=r3,16
+ adds r2=VMM_PT_REGS_R16_OFFSET,r1
+ adds r14 = VMM_VCPU_GP_OFFSET,r13
+ ;;
+ mov r8=ar.ccv
+ ld8 r14 = [r14]
+ ;;
+ mov r1=r14 /* establish kernel global pointer */
+ ;; \
+ bsw.1
+ ;;
+ alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
+ mov out0=r13
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i
+ ;;
+ //(p15) ssm psr.i
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ srlz.i // ensure everybody knows psr.ic is back on
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r16,16
+.mem.offset 8,0; st8.spill [r3]=r17,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r18,16
+.mem.offset 8,0; st8.spill [r3]=r19,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r20,16
+.mem.offset 8,0; st8.spill [r3]=r21,16
+ mov r18=b6
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r22,16
+.mem.offset 8,0; st8.spill [r3]=r23,16
+ mov r19=b7
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r24,16
+.mem.offset 8,0; st8.spill [r3]=r25,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r26,16
+.mem.offset 8,0; st8.spill [r3]=r27,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r28,16
+.mem.offset 8,0; st8.spill [r3]=r29,16
+ ;;
+.mem.offset 0,0; st8.spill [r2]=r30,16
+.mem.offset 8,0; st8.spill [r3]=r31,32
+ ;;
+ mov ar.fpsr=r11 /* M-unit */
+ st8 [r2]=r8,8 /* ar.ccv */
+ adds r24=PT(B6)-PT(F7),r3
+ ;;
+ stf.spill [r2]=f6,32
+ stf.spill [r3]=f7,32
+ ;;
+ stf.spill [r2]=f8,32
+ stf.spill [r3]=f9,32
+ ;;
+ stf.spill [r2]=f10
+ stf.spill [r3]=f11
+ adds r25=PT(B7)-PT(F11),r3
+ ;;
+ st8 [r24]=r18,16 /* b6 */
+ st8 [r25]=r19,16 /* b7 */
+ ;;
+ st8 [r24]=r9 /* ar.csd */
+ st8 [r25]=r10 /* ar.ssd */
+ ;;
+ srlz.d // make sure we see the effect of cr.ivr
+ addl r14=@gprel(ia64_leave_nested),gp
+ ;;
+ mov rp=r14
+ br.call.sptk.many b6=kvm_ia64_handle_irq
+ ;;
+END(kvm_interrupt)
+
+ .global kvm_dispatch_vexirq
+ .org kvm_ia64_ivt+0x3400
+//////////////////////////////////////////////////////////////////////
+// 0x3400 Entry 13 (size 64 bundles) Reserved
+ENTRY(kvm_virtual_exirq)
+ mov r31=pr
+ mov r19=13
+ mov r30 =r0
+ ;;
+kvm_dispatch_vexirq:
+ cmp.eq p6,p0 = 1,r30
+ ;;
+(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
+ ;;
+(p6)ld8 r1 = [r29]
+ ;;
+ KVM_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,1,0
+ mov out0=r13
+
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ KVM_SAVE_REST
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ mov rp=r14
+ br.call.sptk.many b6=kvm_vexirq
+END(kvm_virtual_exirq)
+
+ .org kvm_ia64_ivt+0x3800
+/////////////////////////////////////////////////////////////////////
+// 0x3800 Entry 14 (size 64 bundles) Reserved
+ KVM_FAULT(14)
+ // this code segment is from 2.6.16.13
+
+
+ .org kvm_ia64_ivt+0x3c00
+///////////////////////////////////////////////////////////////////////
+// 0x3c00 Entry 15 (size 64 bundles) Reserved
+ KVM_FAULT(15)
+
+
+ .org kvm_ia64_ivt+0x4000
+///////////////////////////////////////////////////////////////////////
+// 0x4000 Entry 16 (size 64 bundles) Reserved
+ KVM_FAULT(16)
+
+ .org kvm_ia64_ivt+0x4400
+//////////////////////////////////////////////////////////////////////
+// 0x4400 Entry 17 (size 64 bundles) Reserved
+ KVM_FAULT(17)
+
+ .org kvm_ia64_ivt+0x4800
+//////////////////////////////////////////////////////////////////////
+// 0x4800 Entry 18 (size 64 bundles) Reserved
+ KVM_FAULT(18)
+
+ .org kvm_ia64_ivt+0x4c00
+//////////////////////////////////////////////////////////////////////
+// 0x4c00 Entry 19 (size 64 bundles) Reserved
+ KVM_FAULT(19)
+
+ .org kvm_ia64_ivt+0x5000
+//////////////////////////////////////////////////////////////////////
+// 0x5000 Entry 20 (size 16 bundles) Page Not Present
+ENTRY(kvm_page_not_present)
+ KVM_REFLECT(20)
+END(kvm_page_not_present)
+
+ .org kvm_ia64_ivt+0x5100
+///////////////////////////////////////////////////////////////////////
+// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
+ENTRY(kvm_key_permission)
+ KVM_REFLECT(21)
+END(kvm_key_permission)
+
+ .org kvm_ia64_ivt+0x5200
+//////////////////////////////////////////////////////////////////////
+// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
+ENTRY(kvm_iaccess_rights)
+ KVM_REFLECT(22)
+END(kvm_iaccess_rights)
+
+ .org kvm_ia64_ivt+0x5300
+//////////////////////////////////////////////////////////////////////
+// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
+ENTRY(kvm_daccess_rights)
+ KVM_REFLECT(23)
+END(kvm_daccess_rights)
+
+ .org kvm_ia64_ivt+0x5400
+/////////////////////////////////////////////////////////////////////
+// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
+ENTRY(kvm_general_exception)
+ KVM_REFLECT(24)
+ KVM_FAULT(24)
+END(kvm_general_exception)
+
+ .org kvm_ia64_ivt+0x5500
+//////////////////////////////////////////////////////////////////////
+// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
+ENTRY(kvm_disabled_fp_reg)
+ KVM_REFLECT(25)
+END(kvm_disabled_fp_reg)
+
+ .org kvm_ia64_ivt+0x5600
+////////////////////////////////////////////////////////////////////
+// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
+ENTRY(kvm_nat_consumption)
+ KVM_REFLECT(26)
+END(kvm_nat_consumption)
+
+ .org kvm_ia64_ivt+0x5700
+/////////////////////////////////////////////////////////////////////
+// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
+ENTRY(kvm_speculation_vector)
+ KVM_REFLECT(27)
+END(kvm_speculation_vector)
+
+ .org kvm_ia64_ivt+0x5800
+/////////////////////////////////////////////////////////////////////
+// 0x5800 Entry 28 (size 16 bundles) Reserved
+ KVM_FAULT(28)
+
+ .org kvm_ia64_ivt+0x5900
+///////////////////////////////////////////////////////////////////
+// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
+ENTRY(kvm_debug_vector)
+ KVM_FAULT(29)
+END(kvm_debug_vector)
+
+ .org kvm_ia64_ivt+0x5a00
+///////////////////////////////////////////////////////////////
+// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
+ENTRY(kvm_unaligned_access)
+ KVM_REFLECT(30)
+END(kvm_unaligned_access)
+
+ .org kvm_ia64_ivt+0x5b00
+//////////////////////////////////////////////////////////////////////
+// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
+ENTRY(kvm_unsupported_data_reference)
+ KVM_REFLECT(31)
+END(kvm_unsupported_data_reference)
+
+ .org kvm_ia64_ivt+0x5c00
+////////////////////////////////////////////////////////////////////
+// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
+ENTRY(kvm_floating_point_fault)
+ KVM_REFLECT(32)
+END(kvm_floating_point_fault)
+
+ .org kvm_ia64_ivt+0x5d00
+/////////////////////////////////////////////////////////////////////
+// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
+ENTRY(kvm_floating_point_trap)
+ KVM_REFLECT(33)
+END(kvm_floating_point_trap)
+
+ .org kvm_ia64_ivt+0x5e00
+//////////////////////////////////////////////////////////////////////
+// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
+ENTRY(kvm_lower_privilege_trap)
+ KVM_REFLECT(34)
+END(kvm_lower_privilege_trap)
+
+ .org kvm_ia64_ivt+0x5f00
+//////////////////////////////////////////////////////////////////////
+// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
+ENTRY(kvm_taken_branch_trap)
+ KVM_REFLECT(35)
+END(kvm_taken_branch_trap)
+
+ .org kvm_ia64_ivt+0x6000
+////////////////////////////////////////////////////////////////////
+// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
+ENTRY(kvm_single_step_trap)
+ KVM_REFLECT(36)
+END(kvm_single_step_trap)
+ .global kvm_virtualization_fault_back
+ .org kvm_ia64_ivt+0x6100
+/////////////////////////////////////////////////////////////////////
+// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
+ENTRY(kvm_virtualization_fault)
+ mov r31=pr
+ adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
+ ;;
+ st8 [r16] = r1
+ adds r17 = VMM_VCPU_GP_OFFSET, r21
+ ;;
+ ld8 r1 = [r17]
+ cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
+ cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
+ cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
+ cmp.eq p9,p0=EVENT_RSM,r24
+ cmp.eq p10,p0=EVENT_SSM,r24
+ cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
+ cmp.eq p12,p0=EVENT_THASH,r24
+ (p6) br.dptk.many kvm_asm_mov_from_ar
+ (p7) br.dptk.many kvm_asm_mov_from_rr
+ (p8) br.dptk.many kvm_asm_mov_to_rr
+ (p9) br.dptk.many kvm_asm_rsm
+ (p10) br.dptk.many kvm_asm_ssm
+ (p11) br.dptk.many kvm_asm_mov_to_psr
+ (p12) br.dptk.many kvm_asm_thash
+ ;;
+kvm_virtualization_fault_back:
+ adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
+ ;;
+ ld8 r1 = [r16]
+ ;;
+ mov r19=37
+ adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
+ adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
+ ;;
+ st8 [r16] = r24
+ st8 [r17] = r25
+ ;;
+ cmp.ne p6,p0=EVENT_RFI, r24
+ (p6) br.sptk kvm_dispatch_virtualization_fault
+ ;;
+ adds r18=VMM_VPD_BASE_OFFSET,r21
+ ;;
+ ld8 r18=[r18]
+ ;;
+ adds r18=VMM_VPD_VIFS_OFFSET,r18
+ ;;
+ ld8 r18=[r18]
+ ;;
+ tbit.z p6,p0=r18,63
+ (p6) br.sptk kvm_dispatch_virtualization_fault
+ ;;
+ //if vifs.v=1 desert current register frame
+ alloc r18=ar.pfs,0,0,0,0
+ br.sptk kvm_dispatch_virtualization_fault
+END(kvm_virtualization_fault)
+
+ .org kvm_ia64_ivt+0x6200
+//////////////////////////////////////////////////////////////
+// 0x6200 Entry 38 (size 16 bundles) Reserved
+ KVM_FAULT(38)
+
+ .org kvm_ia64_ivt+0x6300
+/////////////////////////////////////////////////////////////////
+// 0x6300 Entry 39 (size 16 bundles) Reserved
+ KVM_FAULT(39)
+
+ .org kvm_ia64_ivt+0x6400
+/////////////////////////////////////////////////////////////////
+// 0x6400 Entry 40 (size 16 bundles) Reserved
+ KVM_FAULT(40)
+
+ .org kvm_ia64_ivt+0x6500
+//////////////////////////////////////////////////////////////////
+// 0x6500 Entry 41 (size 16 bundles) Reserved
+ KVM_FAULT(41)
+
+ .org kvm_ia64_ivt+0x6600
+//////////////////////////////////////////////////////////////////
+// 0x6600 Entry 42 (size 16 bundles) Reserved
+ KVM_FAULT(42)
+
+ .org kvm_ia64_ivt+0x6700
+//////////////////////////////////////////////////////////////////
+// 0x6700 Entry 43 (size 16 bundles) Reserved
+ KVM_FAULT(43)
+
+ .org kvm_ia64_ivt+0x6800
+//////////////////////////////////////////////////////////////////
+// 0x6800 Entry 44 (size 16 bundles) Reserved
+ KVM_FAULT(44)
+
+ .org kvm_ia64_ivt+0x6900
+///////////////////////////////////////////////////////////////////
+// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
+//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
+ENTRY(kvm_ia32_exception)
+ KVM_FAULT(45)
+END(kvm_ia32_exception)
+
+ .org kvm_ia64_ivt+0x6a00
+////////////////////////////////////////////////////////////////////
+// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
+ENTRY(kvm_ia32_intercept)
+ KVM_FAULT(47)
+END(kvm_ia32_intercept)
+
+ .org kvm_ia64_ivt+0x6c00
+/////////////////////////////////////////////////////////////////////
+// 0x6c00 Entry 48 (size 16 bundles) Reserved
+ KVM_FAULT(48)
+
+ .org kvm_ia64_ivt+0x6d00
+//////////////////////////////////////////////////////////////////////
+// 0x6d00 Entry 49 (size 16 bundles) Reserved
+ KVM_FAULT(49)
+
+ .org kvm_ia64_ivt+0x6e00
+//////////////////////////////////////////////////////////////////////
+// 0x6e00 Entry 50 (size 16 bundles) Reserved
+ KVM_FAULT(50)
+
+ .org kvm_ia64_ivt+0x6f00
+/////////////////////////////////////////////////////////////////////
+// 0x6f00 Entry 51 (size 16 bundles) Reserved
+ KVM_FAULT(52)
+
+ .org kvm_ia64_ivt+0x7100
+////////////////////////////////////////////////////////////////////
+// 0x7100 Entry 53 (size 16 bundles) Reserved
+ KVM_FAULT(53)
+
+ .org kvm_ia64_ivt+0x7200
+/////////////////////////////////////////////////////////////////////
+// 0x7200 Entry 54 (size 16 bundles) Reserved
+ KVM_FAULT(54)
+
+ .org kvm_ia64_ivt+0x7300
+////////////////////////////////////////////////////////////////////
+// 0x7300 Entry 55 (size 16 bundles) Reserved
+ KVM_FAULT(55)
+
+ .org kvm_ia64_ivt+0x7400
+////////////////////////////////////////////////////////////////////
+// 0x7400 Entry 56 (size 16 bundles) Reserved
+ KVM_FAULT(56)
+
+ .org kvm_ia64_ivt+0x7500
+/////////////////////////////////////////////////////////////////////
+// 0x7500 Entry 57 (size 16 bundles) Reserved
+ KVM_FAULT(57)
+
+ .org kvm_ia64_ivt+0x7600
+/////////////////////////////////////////////////////////////////////
+// 0x7600 Entry 58 (size 16 bundles) Reserved
+ KVM_FAULT(58)
+
+ .org kvm_ia64_ivt+0x7700
+////////////////////////////////////////////////////////////////////
+// 0x7700 Entry 59 (size 16 bundles) Reserved
+ KVM_FAULT(59)
+
+ .org kvm_ia64_ivt+0x7800
+////////////////////////////////////////////////////////////////////
+// 0x7800 Entry 60 (size 16 bundles) Reserved
+ KVM_FAULT(60)
+
+ .org kvm_ia64_ivt+0x7900
+/////////////////////////////////////////////////////////////////////
+// 0x7900 Entry 61 (size 16 bundles) Reserved
+ KVM_FAULT(61)
+
+ .org kvm_ia64_ivt+0x7a00
+/////////////////////////////////////////////////////////////////////
+// 0x7a00 Entry 62 (size 16 bundles) Reserved
+ KVM_FAULT(62)
+
+ .org kvm_ia64_ivt+0x7b00
+/////////////////////////////////////////////////////////////////////
+// 0x7b00 Entry 63 (size 16 bundles) Reserved
+ KVM_FAULT(63)
+
+ .org kvm_ia64_ivt+0x7c00
+////////////////////////////////////////////////////////////////////
+// 0x7c00 Entry 64 (size 16 bundles) Reserved
+ KVM_FAULT(64)
+
+ .org kvm_ia64_ivt+0x7d00
+/////////////////////////////////////////////////////////////////////
+// 0x7d00 Entry 65 (size 16 bundles) Reserved
+ KVM_FAULT(65)
+
+ .org kvm_ia64_ivt+0x7e00
+/////////////////////////////////////////////////////////////////////
+// 0x7e00 Entry 66 (size 16 bundles) Reserved
+ KVM_FAULT(66)
+
+ .org kvm_ia64_ivt+0x7f00
+////////////////////////////////////////////////////////////////////
+// 0x7f00 Entry 67 (size 16 bundles) Reserved
+ KVM_FAULT(67)
+
+ .org kvm_ia64_ivt+0x8000
+// There is no particular reason for this code to be here, other than that
+// there happens to be space here that would go unused otherwise. If this
+// fault ever gets "unreserved", simply moved the following code to a more
+// suitable spot...
+
+
+ENTRY(kvm_dtlb_miss_dispatch)
+ mov r19 = 2
+ KVM_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,3,0
+ mov out0=cr.ifa
+ mov out1=r15
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
+ ;;
+ KVM_SAVE_REST
+ KVM_SAVE_EXTRA
+ mov rp=r14
+ ;;
+ adds out2=16,r12
+ br.call.sptk.many b6=kvm_page_fault
+END(kvm_dtlb_miss_dispatch)
+
+ENTRY(kvm_itlb_miss_dispatch)
+
+ KVM_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,3,0
+ mov out0=cr.ifa
+ mov out1=r15
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ KVM_SAVE_REST
+ mov rp=r14
+ ;;
+ adds out2=16,r12
+ br.call.sptk.many b6=kvm_page_fault
+END(kvm_itlb_miss_dispatch)
+
+ENTRY(kvm_dispatch_reflection)
+ /*
+ * Input:
+ * psr.ic: off
+ * r19: intr type (offset into ivt, see ia64_int.h)
+ * r31: contains saved predicates (pr)
+ */
+ KVM_SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,5,0
+ mov out0=cr.ifa
+ mov out1=cr.isr
+ mov out2=cr.iim
+ mov out3=r15
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ KVM_SAVE_REST
+ mov rp=r14
+ ;;
+ adds out4=16,r12
+ br.call.sptk.many b6=reflect_interruption
+END(kvm_dispatch_reflection)
+
+ENTRY(kvm_dispatch_virtualization_fault)
+ adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
+ adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
+ ;;
+ st8 [r16] = r24
+ st8 [r17] = r25
+ ;;
+ KVM_SAVE_MIN_WITH_COVER_R19
+ ;;
+ alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
+ mov out0=r13 //vcpu
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ //(p15) ssm psr.i // restore psr.i
+ addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
+ ;;
+ KVM_SAVE_REST
+ KVM_SAVE_EXTRA
+ mov rp=r14
+ ;;
+ adds out1=16,sp //regs
+ br.call.sptk.many b6=kvm_emulate
+END(kvm_dispatch_virtualization_fault)
+
+
+ENTRY(kvm_dispatch_interrupt)
+ KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
+ ;;
+ alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
+ //mov out0=cr.ivr // pass cr.ivr as first arg
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
+ ssm psr.ic
+ ;;
+ srlz.i
+ ;;
+ //(p15) ssm psr.i
+ addl r14=@gprel(ia64_leave_hypervisor),gp
+ ;;
+ KVM_SAVE_REST
+ mov rp=r14
+ ;;
+ mov out0=r13 // pass pointer to pt_regs as second arg
+ br.call.sptk.many b6=kvm_ia64_handle_irq
+END(kvm_dispatch_interrupt)
+
+
+
+
+GLOBAL_ENTRY(ia64_leave_nested)
+ rsm psr.i
+ ;;
+ adds r21=PT(PR)+16,r12
+ ;;
+ lfetch [r21],PT(CR_IPSR)-PT(PR)
+ adds r2=PT(B6)+16,r12
+ adds r3=PT(R16)+16,r12
+ ;;
+ lfetch [r21]
+ ld8 r28=[r2],8 // load b6
+ adds r29=PT(R24)+16,r12
+
+ ld8.fill r16=[r3]
+ adds r3=PT(AR_CSD)-PT(R16),r3
+ adds r30=PT(AR_CCV)+16,r12
+ ;;
+ ld8.fill r24=[r29]
+ ld8 r15=[r30] // load ar.ccv
+ ;;
+ ld8 r29=[r2],16 // load b7
+ ld8 r30=[r3],16 // load ar.csd
+ ;;
+ ld8 r31=[r2],16 // load ar.ssd
+ ld8.fill r8=[r3],16
+ ;;
+ ld8.fill r9=[r2],16
+ ld8.fill r10=[r3],PT(R17)-PT(R10)
+ ;;
+ ld8.fill r11=[r2],PT(R18)-PT(R11)
+ ld8.fill r17=[r3],16
+ ;;
+ ld8.fill r18=[r2],16
+ ld8.fill r19=[r3],16
+ ;;
+ ld8.fill r20=[r2],16
+ ld8.fill r21=[r3],16
+ mov ar.csd=r30
+ mov ar.ssd=r31
+ ;;
+ rsm psr.i | psr.ic
+ // initiate turning off of interrupt and interruption collection
+ invala // invalidate ALAT
+ ;;
+ srlz.i
+ ;;
+ ld8.fill r22=[r2],24
+ ld8.fill r23=[r3],24
+ mov b6=r28
+ ;;
+ ld8.fill r25=[r2],16
+ ld8.fill r26=[r3],16
+ mov b7=r29
+ ;;
+ ld8.fill r27=[r2],16
+ ld8.fill r28=[r3],16
+ ;;
+ ld8.fill r29=[r2],16
+ ld8.fill r30=[r3],24
+ ;;
+ ld8.fill r31=[r2],PT(F9)-PT(R31)
+ adds r3=PT(F10)-PT(F6),r3
+ ;;
+ ldf.fill f9=[r2],PT(F6)-PT(F9)
+ ldf.fill f10=[r3],PT(F8)-PT(F10)
+ ;;
+ ldf.fill f6=[r2],PT(F7)-PT(F6)
+ ;;
+ ldf.fill f7=[r2],PT(F11)-PT(F7)
+ ldf.fill f8=[r3],32
+ ;;
+ srlz.i // ensure interruption collection is off
+ mov ar.ccv=r15
+ ;;
+ bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
+ ;;
+ ldf.fill f11=[r2]
+// mov r18=r13
+// mov r21=r13
+ adds r16=PT(CR_IPSR)+16,r12
+ adds r17=PT(CR_IIP)+16,r12
+ ;;
+ ld8 r29=[r16],16 // load cr.ipsr
+ ld8 r28=[r17],16 // load cr.iip
+ ;;
+ ld8 r30=[r16],16 // load cr.ifs
+ ld8 r25=[r17],16 // load ar.unat
+ ;;
+ ld8 r26=[r16],16 // load ar.pfs
+ ld8 r27=[r17],16 // load ar.rsc
+ cmp.eq p9,p0=r0,r0
+ // set p9 to indicate that we should restore cr.ifs
+ ;;
+ ld8 r24=[r16],16 // load ar.rnat (may be garbage)
+ ld8 r23=[r17],16// load ar.bspstore (may be garbage)
+ ;;
+ ld8 r31=[r16],16 // load predicates
+ ld8 r22=[r17],16 // load b0
+ ;;
+ ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
+ ld8.fill r1=[r17],16 // load r1
+ ;;
+ ld8.fill r12=[r16],16
+ ld8.fill r13=[r17],16
+ ;;
+ ld8 r20=[r16],16 // ar.fpsr
+ ld8.fill r15=[r17],16
+ ;;
+ ld8.fill r14=[r16],16
+ ld8.fill r2=[r17]
+ ;;
+ ld8.fill r3=[r16]
+ ;;
+ mov r16=ar.bsp // get existing backing store pointer
+ ;;
+ mov b0=r22
+ mov ar.pfs=r26
+ mov cr.ifs=r30
+ mov cr.ipsr=r29
+ mov ar.fpsr=r20
+ mov cr.iip=r28
+ ;;
+ mov ar.rsc=r27
+ mov ar.unat=r25
+ mov pr=r31,-1
+ rfi
+END(ia64_leave_nested)
+
+
+
+GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
+ /*
+ * work.need_resched etc. mustn't get changed
+ *by this CPU before it returns to
+ ;;
+ * user- or fsys-mode, hence we disable interrupts early on:
+ */
+ adds r2 = PT(R4)+16,r12
+ adds r3 = PT(R5)+16,r12
+ adds r8 = PT(EML_UNAT)+16,r12
+ ;;
+ ld8 r8 = [r8]
+ ;;
+ mov ar.unat=r8
+ ;;
+ ld8.fill r4=[r2],16 //load r4
+ ld8.fill r5=[r3],16 //load r5
+ ;;
+ ld8.fill r6=[r2] //load r6
+ ld8.fill r7=[r3] //load r7
+ ;;
+END(ia64_leave_hypervisor_prepare)
+//fall through
+GLOBAL_ENTRY(ia64_leave_hypervisor)
+ rsm psr.i
+ ;;
+ br.call.sptk.many b0=leave_hypervisor_tail
+ ;;
+ adds r20=PT(PR)+16,r12
+ adds r8=PT(EML_UNAT)+16,r12
+ ;;
+ ld8 r8=[r8]
+ ;;
+ mov ar.unat=r8
+ ;;
+ lfetch [r20],PT(CR_IPSR)-PT(PR)
+ adds r2 = PT(B6)+16,r12
+ adds r3 = PT(B7)+16,r12
+ ;;
+ lfetch [r20]
+ ;;
+ ld8 r24=[r2],16 /* B6 */
+ ld8 r25=[r3],16 /* B7 */
+ ;;
+ ld8 r26=[r2],16 /* ar_csd */
+ ld8 r27=[r3],16 /* ar_ssd */
+ mov b6 = r24
+ ;;
+ ld8.fill r8=[r2],16
+ ld8.fill r9=[r3],16
+ mov b7 = r25
+ ;;
+ mov ar.csd = r26
+ mov ar.ssd = r27
+ ;;
+ ld8.fill r10=[r2],PT(R15)-PT(R10)
+ ld8.fill r11=[r3],PT(R14)-PT(R11)
+ ;;
+ ld8.fill r15=[r2],PT(R16)-PT(R15)
+ ld8.fill r14=[r3],PT(R17)-PT(R14)
+ ;;
+ ld8.fill r16=[r2],16
+ ld8.fill r17=[r3],16
+ ;;
+ ld8.fill r18=[r2],16
+ ld8.fill r19=[r3],16
+ ;;
+ ld8.fill r20=[r2],16
+ ld8.fill r21=[r3],16
+ ;;
+ ld8.fill r22=[r2],16
+ ld8.fill r23=[r3],16
+ ;;
+ ld8.fill r24=[r2],16
+ ld8.fill r25=[r3],16
+ ;;
+ ld8.fill r26=[r2],16
+ ld8.fill r27=[r3],16
+ ;;
+ ld8.fill r28=[r2],16
+ ld8.fill r29=[r3],16
+ ;;
+ ld8.fill r30=[r2],PT(F6)-PT(R30)
+ ld8.fill r31=[r3],PT(F7)-PT(R31)
+ ;;
+ rsm psr.i | psr.ic
+ // initiate turning off of interrupt and interruption collection
+ invala // invalidate ALAT
+ ;;
+ srlz.i // ensure interruption collection is off
+ ;;
+ bsw.0
+ ;;
+ adds r16 = PT(CR_IPSR)+16,r12
+ adds r17 = PT(CR_IIP)+16,r12
+ mov r21=r13 // get current
+ ;;
+ ld8 r31=[r16],16 // load cr.ipsr
+ ld8 r30=[r17],16 // load cr.iip
+ ;;
+ ld8 r29=[r16],16 // load cr.ifs
+ ld8 r28=[r17],16 // load ar.unat
+ ;;
+ ld8 r27=[r16],16 // load ar.pfs
+ ld8 r26=[r17],16 // load ar.rsc
+ ;;
+ ld8 r25=[r16],16 // load ar.rnat
+ ld8 r24=[r17],16 // load ar.bspstore
+ ;;
+ ld8 r23=[r16],16 // load predicates
+ ld8 r22=[r17],16 // load b0
+ ;;
+ ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
+ ld8.fill r1=[r17],16 //load r1
+ ;;
+ ld8.fill r12=[r16],16 //load r12
+ ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
+ ;;
+ ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
+ ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
+ ;;
+ ld8.fill r3=[r16] //load r3
+ ld8 r18=[r17] //load ar_ccv
+ ;;
+ mov ar.fpsr=r19
+ mov ar.ccv=r18
+ shr.u r18=r20,16
+ ;;
+kvm_rbs_switch:
+ mov r19=96
+
+kvm_dont_preserve_current_frame:
+/*
+ * To prevent leaking bits between the hypervisor and guest domain,
+ * we must clear the stacked registers in the "invalid" partition here.
+ * 5 registers/cycle on McKinley).
+ */
+# define pRecurse p6
+# define pReturn p7
+# define Nregs 14
+
+ alloc loc0=ar.pfs,2,Nregs-2,2,0
+ shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
+ sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
+ ;;
+ mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
+ shladd in0=loc1,3,r19
+ mov in1=0
+ ;;
+ TEXT_ALIGN(32)
+kvm_rse_clear_invalid:
+ alloc loc0=ar.pfs,2,Nregs-2,2,0
+ cmp.lt pRecurse,p0=Nregs*8,in0
+ // if more than Nregs regs left to clear, (re)curse
+ add out0=-Nregs*8,in0
+ add out1=1,in1 // increment recursion count
+ mov loc1=0
+ mov loc2=0
+ ;;
+ mov loc3=0
+ mov loc4=0
+ mov loc5=0
+ mov loc6=0
+ mov loc7=0
+(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
+ ;;
+ mov loc8=0
+ mov loc9=0
+ cmp.ne pReturn,p0=r0,in1
+ // if recursion count != 0, we need to do a br.ret
+ mov loc10=0
+ mov loc11=0
+(pReturn) br.ret.dptk.many b0
+
+# undef pRecurse
+# undef pReturn
+
+// loadrs has already been shifted
+ alloc r16=ar.pfs,0,0,0,0 // drop current register frame
+ ;;
+ loadrs
+ ;;
+ mov ar.bspstore=r24
+ ;;
+ mov ar.unat=r28
+ mov ar.rnat=r25
+ mov ar.rsc=r26
+ ;;
+ mov cr.ipsr=r31
+ mov cr.iip=r30
+ mov cr.ifs=r29
+ mov ar.pfs=r27
+ adds r18=VMM_VPD_BASE_OFFSET,r21
+ ;;
+ ld8 r18=[r18] //vpd
+ adds r17=VMM_VCPU_ISR_OFFSET,r21
+ ;;
+ ld8 r17=[r17]
+ adds r19=VMM_VPD_VPSR_OFFSET,r18
+ ;;
+ ld8 r19=[r19] //vpsr
+ adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21
+ ;;
+ ld8 r20=[r20]
+ ;;
+//vsa_sync_write_start
+ mov r25=r18
+ adds r16= VMM_VCPU_GP_OFFSET,r21
+ ;;
+ ld8 r16= [r16] // Put gp in r24
+ movl r24=@gprel(ia64_vmm_entry) // calculate return address
+ ;;
+ add r24=r24,r16
+ ;;
+ add r16=PAL_VPS_SYNC_WRITE,r20
+ ;;
+ mov b0=r16
+ br.cond.sptk b0 // call the service
+ ;;
+END(ia64_leave_hypervisor)
+// fall through
+GLOBAL_ENTRY(ia64_vmm_entry)
+/*
+ * must be at bank 0
+ * parameter:
+ * r17:cr.isr
+ * r18:vpd
+ * r19:vpsr
+ * r20:__vsa_base
+ * r22:b0
+ * r23:predicate
+ */
+ mov r24=r22
+ mov r25=r18
+ tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
+ ;;
+ (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
+ (p1) br.sptk.many ia64_vmm_entry_out
+ ;;
+ tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir
+ ;;
+ (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
+ (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
+ (p2) ld8 r26=[r25]
+ ;;
+ia64_vmm_entry_out:
+ mov pr=r23,-2
+ mov b0=r29
+ ;;
+ br.cond.sptk b0 // call pal service
+END(ia64_vmm_entry)
+
+
+
+/*
+ * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
+ * u64 arg3, u64 arg4, u64 arg5,
+ * u64 arg6, u64 arg7);
+ *
+ * XXX: The currently defined services use only 4 args at the max. The
+ * rest are not consumed.
+ */
+GLOBAL_ENTRY(ia64_call_vsa)
+ .regstk 4,4,0,0
+
+rpsave = loc0
+pfssave = loc1
+psrsave = loc2
+entry = loc3
+hostret = r24
+
+ alloc pfssave=ar.pfs,4,4,0,0
+ mov rpsave=rp
+ adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
+ ;;
+ ld8 entry=[entry]
+1: mov hostret=ip
+ mov r25=in1 // copy arguments
+ mov r26=in2
+ mov r27=in3
+ mov psrsave=psr
+ ;;
+ tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
+ tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
+ ;;
+ add hostret=2f-1b,hostret // calculate return address
+ add entry=entry,in0
+ ;;
+ rsm psr.i | psr.ic
+ ;;
+ srlz.i
+ mov b6=entry
+ br.cond.sptk b6 // call the service
+2:
+ // Architectural sequence for enabling interrupts if necessary
+(p7) ssm psr.ic
+ ;;
+(p7) srlz.i
+ ;;
+//(p6) ssm psr.i
+ ;;
+ mov rp=rpsave
+ mov ar.pfs=pfssave
+ mov r8=r31
+ ;;
+ srlz.d
+ br.ret.sptk rp
+
+END(ia64_call_vsa)
+
+#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
+
+GLOBAL_ENTRY(vmm_reset_entry)
+ //set up ipsr, iip, vpd.vpsr, dcr
+ // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
+ // For DCR: all bits 0
+ adds r14=-VMM_PT_REGS_SIZE, r12
+ ;;
+ movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
+ movl r10=0x8000000000000000
+ adds r16=PT(CR_IIP), r14
+ adds r20=PT(R1), r14
+ ;;
+ rsm psr.ic | psr.i
+ ;;
+ srlz.i
+ ;;
+ bsw.0
+ ;;
+ mov r21 =r13
+ ;;
+ bsw.1
+ ;;
+ mov ar.rsc = 0
+ ;;
+ flushrs
+ ;;
+ mov ar.bspstore = 0
+ // clear BSPSTORE
+ ;;
+ mov cr.ipsr=r6
+ mov cr.ifs=r10
+ ld8 r4 = [r16] // Set init iip for first run.
+ ld8 r1 = [r20]
+ ;;
+ mov cr.iip=r4
+ ;;
+ adds r16=VMM_VPD_BASE_OFFSET,r13
+ adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13
+ ;;
+ ld8 r18=[r16]
+ ld8 r20=[r20]
+ ;;
+ adds r19=VMM_VPD_VPSR_OFFSET,r18
+ ;;
+ ld8 r19=[r19]
+ mov r17=r0
+ mov r22=r0
+ mov r23=r0
+ br.cond.sptk ia64_vmm_entry
+ br.ret.sptk b0
+END(vmm_reset_entry)
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h
new file mode 100644
index 00000000000..f6c5617e16a
--- /dev/null
+++ b/arch/ia64/kvm/vti.h
@@ -0,0 +1,290 @@
+/*
+ * vti.h: prototype for generial vt related interface
+ * Copyright (c) 2004, Intel Corporation.
+ *
+ * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
+ * Fred Yang (fred.yang@intel.com)
+ * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ * Zhang xiantao <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+#ifndef _KVM_VT_I_H
+#define _KVM_VT_I_H
+
+#ifndef __ASSEMBLY__
+#include <asm/page.h>
+
+#include <linux/kvm_host.h>
+
+/* define itr.i and itr.d in ia64_itr function */
+#define ITR 0x01
+#define DTR 0x02
+#define IaDTR 0x03
+
+#define IA64_TR_VMM 6 /*itr6, dtr6 : maps vmm code, vmbuffer*/
+#define IA64_TR_VM_DATA 7 /*dtr7 : maps current vm data*/
+
+#define RR6 (6UL<<61)
+#define RR7 (7UL<<61)
+
+
+/* config_options in pal_vp_init_env */
+#define VP_INITIALIZE 1UL
+#define VP_FR_PMC 1UL<<1
+#define VP_OPCODE 1UL<<8
+#define VP_CAUSE 1UL<<9
+#define VP_FW_ACC 1UL<<63
+
+/* init vp env with initializing vm_buffer */
+#define VP_INIT_ENV_INITALIZE (VP_INITIALIZE | VP_FR_PMC |\
+ VP_OPCODE | VP_CAUSE | VP_FW_ACC)
+/* init vp env without initializing vm_buffer */
+#define VP_INIT_ENV VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC
+
+#define PAL_VP_CREATE 265
+/* Stacked Virt. Initializes a new VPD for the operation of
+ * a new virtual processor in the virtual environment.
+ */
+#define PAL_VP_ENV_INFO 266
+/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/
+#define PAL_VP_EXIT_ENV 267
+/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/
+#define PAL_VP_INIT_ENV 268
+/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/
+#define PAL_VP_REGISTER 269
+/*Stacked Virt. Register a different host IVT for the virtual processor.*/
+#define PAL_VP_RESUME 270
+/* Renamed from PAL_VP_RESUME */
+#define PAL_VP_RESTORE 270
+/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/
+#define PAL_VP_SUSPEND 271
+/* Renamed from PAL_VP_SUSPEND */
+#define PAL_VP_SAVE 271
+/* Stacked Virt. Suspends operation for the specified virtual processor on
+ * the logical processor.
+ */
+#define PAL_VP_TERMINATE 272
+/* Stacked Virt. Terminates operation for the specified virtual processor.*/
+
+union vac {
+ unsigned long value;
+ struct {
+ int a_int:1;
+ int a_from_int_cr:1;
+ int a_to_int_cr:1;
+ int a_from_psr:1;
+ int a_from_cpuid:1;
+ int a_cover:1;
+ int a_bsw:1;
+ long reserved:57;
+ };
+};
+
+union vdc {
+ unsigned long value;
+ struct {
+ int d_vmsw:1;
+ int d_extint:1;
+ int d_ibr_dbr:1;
+ int d_pmc:1;
+ int d_to_pmd:1;
+ int d_itm:1;
+ long reserved:58;
+ };
+};
+
+struct vpd {
+ union vac vac;
+ union vdc vdc;
+ unsigned long virt_env_vaddr;
+ unsigned long reserved1[29];
+ unsigned long vhpi;
+ unsigned long reserved2[95];
+ unsigned long vgr[16];
+ unsigned long vbgr[16];
+ unsigned long vnat;
+ unsigned long vbnat;
+ unsigned long vcpuid[5];
+ unsigned long reserved3[11];
+ unsigned long vpsr;
+ unsigned long vpr;
+ unsigned long reserved4[76];
+ union {
+ unsigned long vcr[128];
+ struct {
+ unsigned long dcr;
+ unsigned long itm;
+ unsigned long iva;
+ unsigned long rsv1[5];
+ unsigned long pta;
+ unsigned long rsv2[7];
+ unsigned long ipsr;
+ unsigned long isr;
+ unsigned long rsv3;
+ unsigned long iip;
+ unsigned long ifa;
+ unsigned long itir;
+ unsigned long iipa;
+ unsigned long ifs;
+ unsigned long iim;
+ unsigned long iha;
+ unsigned long rsv4[38];
+ unsigned long lid;
+ unsigned long ivr;
+ unsigned long tpr;
+ unsigned long eoi;
+ unsigned long irr[4];
+ unsigned long itv;
+ unsigned long pmv;
+ unsigned long cmcv;
+ unsigned long rsv5[5];
+ unsigned long lrr0;
+ unsigned long lrr1;
+ unsigned long rsv6[46];
+ };
+ };
+ unsigned long reserved5[128];
+ unsigned long reserved6[3456];
+ unsigned long vmm_avail[128];
+ unsigned long reserved7[4096];
+};
+
+#define PAL_PROC_VM_BIT (1UL << 40)
+#define PAL_PROC_VMSW_BIT (1UL << 54)
+
+static inline s64 ia64_pal_vp_env_info(u64 *buffer_size,
+ u64 *vp_env_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0);
+ *buffer_size = iprv.v0;
+ *vp_env_info = iprv.v1;
+ return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_exit_env(u64 iva)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0);
+ return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr,
+ u64 vbase_addr, u64 *vsa_base)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr,
+ vbase_addr);
+ *vsa_base = iprv.v0;
+
+ return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0);
+
+ return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
+
+ return iprv.status;
+}
+
+#endif
+
+/*VPD field offset*/
+#define VPD_VAC_START_OFFSET 0
+#define VPD_VDC_START_OFFSET 8
+#define VPD_VHPI_START_OFFSET 256
+#define VPD_VGR_START_OFFSET 1024
+#define VPD_VBGR_START_OFFSET 1152
+#define VPD_VNAT_START_OFFSET 1280
+#define VPD_VBNAT_START_OFFSET 1288
+#define VPD_VCPUID_START_OFFSET 1296
+#define VPD_VPSR_START_OFFSET 1424
+#define VPD_VPR_START_OFFSET 1432
+#define VPD_VRSE_CFLE_START_OFFSET 1440
+#define VPD_VCR_START_OFFSET 2048
+#define VPD_VTPR_START_OFFSET 2576
+#define VPD_VRR_START_OFFSET 3072
+#define VPD_VMM_VAIL_START_OFFSET 31744
+
+/*Virtualization faults*/
+
+#define EVENT_MOV_TO_AR 1
+#define EVENT_MOV_TO_AR_IMM 2
+#define EVENT_MOV_FROM_AR 3
+#define EVENT_MOV_TO_CR 4
+#define EVENT_MOV_FROM_CR 5
+#define EVENT_MOV_TO_PSR 6
+#define EVENT_MOV_FROM_PSR 7
+#define EVENT_ITC_D 8
+#define EVENT_ITC_I 9
+#define EVENT_MOV_TO_RR 10
+#define EVENT_MOV_TO_DBR 11
+#define EVENT_MOV_TO_IBR 12
+#define EVENT_MOV_TO_PKR 13
+#define EVENT_MOV_TO_PMC 14
+#define EVENT_MOV_TO_PMD 15
+#define EVENT_ITR_D 16
+#define EVENT_ITR_I 17
+#define EVENT_MOV_FROM_RR 18
+#define EVENT_MOV_FROM_DBR 19
+#define EVENT_MOV_FROM_IBR 20
+#define EVENT_MOV_FROM_PKR 21
+#define EVENT_MOV_FROM_PMC 22
+#define EVENT_MOV_FROM_CPUID 23
+#define EVENT_SSM 24
+#define EVENT_RSM 25
+#define EVENT_PTC_L 26
+#define EVENT_PTC_G 27
+#define EVENT_PTC_GA 28
+#define EVENT_PTR_D 29
+#define EVENT_PTR_I 30
+#define EVENT_THASH 31
+#define EVENT_TTAG 32
+#define EVENT_TPA 33
+#define EVENT_TAK 34
+#define EVENT_PTC_E 35
+#define EVENT_COVER 36
+#define EVENT_RFI 37
+#define EVENT_BSW_0 38
+#define EVENT_BSW_1 39
+#define EVENT_VMSW 40
+
+/**PAL virtual services offsets */
+#define PAL_VPS_RESUME_NORMAL 0x0000
+#define PAL_VPS_RESUME_HANDLER 0x0400
+#define PAL_VPS_SYNC_READ 0x0800
+#define PAL_VPS_SYNC_WRITE 0x0c00
+#define PAL_VPS_SET_PENDING_INTERRUPT 0x1000
+#define PAL_VPS_THASH 0x1400
+#define PAL_VPS_TTAG 0x1800
+#define PAL_VPS_RESTORE 0x1c00
+#define PAL_VPS_SAVE 0x2000
+
+#endif/* _VT_I_H*/
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
new file mode 100644
index 00000000000..def4576d22b
--- /dev/null
+++ b/arch/ia64/kvm/vtlb.c
@@ -0,0 +1,636 @@
+/*
+ * vtlb.c: guest virtual tlb handling module.
+ * Copyright (c) 2004, Intel Corporation.
+ * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
+ * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include "vcpu.h"
+
+#include <linux/rwsem.h>
+
+#include <asm/tlb.h>
+
+/*
+ * Check to see if the address rid:va is translated by the TLB
+ */
+
+static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
+{
+ return ((trp->p) && (trp->rid == rid)
+ && ((va-trp->vadr) < PSIZE(trp->ps)));
+}
+
+/*
+ * Only for GUEST TR format.
+ */
+static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
+{
+ u64 sa1, ea1;
+
+ if (!trp->p || trp->rid != rid)
+ return 0;
+
+ sa1 = trp->vadr;
+ ea1 = sa1 + PSIZE(trp->ps) - 1;
+ eva -= 1;
+ if ((sva > ea1) || (sa1 > eva))
+ return 0;
+ else
+ return 1;
+
+}
+
+void machine_tlb_purge(u64 va, u64 ps)
+{
+ ia64_ptcl(va, ps << 2);
+}
+
+void local_flush_tlb_all(void)
+{
+ int i, j;
+ unsigned long flags, count0, count1;
+ unsigned long stride0, stride1, addr;
+
+ addr = current_vcpu->arch.ptce_base;
+ count0 = current_vcpu->arch.ptce_count[0];
+ count1 = current_vcpu->arch.ptce_count[1];
+ stride0 = current_vcpu->arch.ptce_stride[0];
+ stride1 = current_vcpu->arch.ptce_stride[1];
+
+ local_irq_save(flags);
+ for (i = 0; i < count0; ++i) {
+ for (j = 0; j < count1; ++j) {
+ ia64_ptce(addr);
+ addr += stride1;
+ }
+ addr += stride0;
+ }
+ local_irq_restore(flags);
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+}
+
+int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
+{
+ union ia64_rr vrr;
+ union ia64_pta vpta;
+ struct ia64_psr vpsr;
+
+ vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+ vrr.val = vcpu_get_rr(vcpu, vadr);
+ vpta.val = vcpu_get_pta(vcpu);
+
+ if (vrr.ve & vpta.ve) {
+ switch (ref) {
+ case DATA_REF:
+ case NA_REF:
+ return vpsr.dt;
+ case INST_REF:
+ return vpsr.dt && vpsr.it && vpsr.ic;
+ case RSE_REF:
+ return vpsr.dt && vpsr.rt;
+
+ }
+ }
+ return 0;
+}
+
+struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
+{
+ u64 index, pfn, rid, pfn_bits;
+
+ pfn_bits = vpta.size - 5 - 8;
+ pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
+ rid = _REGION_ID(vrr);
+ index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
+ *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
+
+ return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
+ (index << 5));
+}
+
+struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
+{
+
+ struct thash_data *trp;
+ int i;
+ u64 rid;
+
+ rid = vcpu_get_rr(vcpu, va);
+ rid = rid & RR_RID_MASK;;
+ if (type == D_TLB) {
+ if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
+ for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
+ i < NDTRS; i++, trp++) {
+ if (__is_tr_translated(trp, rid, va))
+ return trp;
+ }
+ }
+ } else {
+ if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
+ for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
+ i < NITRS; i++, trp++) {
+ if (__is_tr_translated(trp, rid, va))
+ return trp;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
+{
+ union ia64_rr rr;
+ struct thash_data *head;
+ unsigned long ps, gpaddr;
+
+ ps = itir_ps(itir);
+
+ gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
+ (ifa & ((1UL << ps) - 1));
+
+ rr.val = ia64_get_rr(ifa);
+ head = (struct thash_data *)ia64_thash(ifa);
+ head->etag = INVALID_TI_TAG;
+ ia64_mf();
+ head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
+ head->itir = rr.ps << 2;
+ head->etag = ia64_ttag(ifa);
+ head->gpaddr = gpaddr;
+}
+
+void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
+{
+ u64 i, dirty_pages = 1;
+ u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
+ spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
+ void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE)
+ + KVM_MEM_DIRTY_LOG_OFS;
+ dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
+
+ vmm_spin_lock(lock);
+ for (i = 0; i < dirty_pages; i++) {
+ /* avoid RMW */
+ if (!test_bit(base_gfn + i, dirty_bitmap))
+ set_bit(base_gfn + i , dirty_bitmap);
+ }
+ vmm_spin_unlock(lock);
+}
+
+void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
+{
+ u64 phy_pte, psr;
+ union ia64_rr mrr;
+
+ mrr.val = ia64_get_rr(va);
+ phy_pte = translate_phy_pte(&pte, itir, va);
+
+ if (itir_ps(itir) >= mrr.ps) {
+ vhpt_insert(phy_pte, itir, va, pte);
+ } else {
+ phy_pte &= ~PAGE_FLAGS_RV_MASK;
+ psr = ia64_clear_ic();
+ ia64_itc(type, va, phy_pte, itir_ps(itir));
+ ia64_set_psr(psr);
+ }
+
+ if (!(pte&VTLB_PTE_IO))
+ mark_pages_dirty(v, pte, itir_ps(itir));
+}
+
+/*
+ * vhpt lookup
+ */
+struct thash_data *vhpt_lookup(u64 va)
+{
+ struct thash_data *head;
+ u64 tag;
+
+ head = (struct thash_data *)ia64_thash(va);
+ tag = ia64_ttag(va);
+ if (head->etag == tag)
+ return head;
+ return NULL;
+}
+
+u64 guest_vhpt_lookup(u64 iha, u64 *pte)
+{
+ u64 ret;
+ struct thash_data *data;
+
+ data = __vtr_lookup(current_vcpu, iha, D_TLB);
+ if (data != NULL)
+ thash_vhpt_insert(current_vcpu, data->page_flags,
+ data->itir, iha, D_TLB);
+
+ asm volatile ("rsm psr.ic|psr.i;;"
+ "srlz.d;;"
+ "ld8.s r9=[%1];;"
+ "tnat.nz p6,p7=r9;;"
+ "(p6) mov %0=1;"
+ "(p6) mov r9=r0;"
+ "(p7) extr.u r9=r9,0,53;;"
+ "(p7) mov %0=r0;"
+ "(p7) st8 [%2]=r9;;"
+ "ssm psr.ic;;"
+ "srlz.d;;"
+ /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
+ : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+
+ return ret;
+}
+
+/*
+ * purge software guest tlb
+ */
+
+static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+ struct thash_data *cur;
+ u64 start, curadr, size, psbits, tag, rr_ps, num;
+ union ia64_rr vrr;
+ struct thash_cb *hcb = &v->arch.vtlb;
+
+ vrr.val = vcpu_get_rr(v, va);
+ psbits = VMX(v, psbits[(va >> 61)]);
+ start = va & ~((1UL << ps) - 1);
+ while (psbits) {
+ curadr = start;
+ rr_ps = __ffs(psbits);
+ psbits &= ~(1UL << rr_ps);
+ num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
+ size = PSIZE(rr_ps);
+ vrr.ps = rr_ps;
+ while (num) {
+ cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
+ if (cur->etag == tag && cur->ps == rr_ps)
+ cur->etag = INVALID_TI_TAG;
+ curadr += size;
+ num--;
+ }
+ }
+}
+
+
+/*
+ * purge VHPT and machine TLB
+ */
+static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+ struct thash_data *cur;
+ u64 start, size, tag, num;
+ union ia64_rr rr;
+
+ start = va & ~((1UL << ps) - 1);
+ rr.val = ia64_get_rr(va);
+ size = PSIZE(rr.ps);
+ num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
+ while (num) {
+ cur = (struct thash_data *)ia64_thash(start);
+ tag = ia64_ttag(start);
+ if (cur->etag == tag)
+ cur->etag = INVALID_TI_TAG;
+ start += size;
+ num--;
+ }
+ machine_tlb_purge(va, ps);
+}
+
+/*
+ * Insert an entry into hash TLB or VHPT.
+ * NOTES:
+ * 1: When inserting VHPT to thash, "va" is a must covered
+ * address by the inserted machine VHPT entry.
+ * 2: The format of entry is always in TLB.
+ * 3: The caller need to make sure the new entry will not overlap
+ * with any existed entry.
+ */
+void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
+{
+ struct thash_data *head;
+ union ia64_rr vrr;
+ u64 tag;
+ struct thash_cb *hcb = &v->arch.vtlb;
+
+ vrr.val = vcpu_get_rr(v, va);
+ vrr.ps = itir_ps(itir);
+ VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
+ head = vsa_thash(hcb->pta, va, vrr.val, &tag);
+ head->page_flags = pte;
+ head->itir = itir;
+ head->etag = tag;
+}
+
+int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
+{
+ struct thash_data *trp;
+ int i;
+ u64 end, rid;
+
+ rid = vcpu_get_rr(vcpu, va);
+ rid = rid & RR_RID_MASK;
+ end = va + PSIZE(ps);
+ if (type == D_TLB) {
+ if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
+ for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
+ i < NDTRS; i++, trp++) {
+ if (__is_tr_overlap(trp, rid, va, end))
+ return i;
+ }
+ }
+ } else {
+ if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
+ for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
+ i < NITRS; i++, trp++) {
+ if (__is_tr_overlap(trp, rid, va, end))
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+/*
+ * Purge entries in VTLB and VHPT
+ */
+void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+ if (vcpu_quick_region_check(v->arch.tc_regions, va))
+ vtlb_purge(v, va, ps);
+ vhpt_purge(v, va, ps);
+}
+
+void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+ u64 old_va = va;
+ va = REGION_OFFSET(va);
+ if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
+ vtlb_purge(v, va, ps);
+ vhpt_purge(v, va, ps);
+}
+
+u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
+{
+ u64 ps, ps_mask, paddr, maddr;
+ union pte_flags phy_pte;
+
+ ps = itir_ps(itir);
+ ps_mask = ~((1UL << ps) - 1);
+ phy_pte.val = *pte;
+ paddr = *pte;
+ paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
+ maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT);
+ if (maddr & GPFN_IO_MASK) {
+ *pte |= VTLB_PTE_IO;
+ return -1;
+ }
+ maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
+ (paddr & ~PAGE_MASK);
+ phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
+ return phy_pte.val;
+}
+
+/*
+ * Purge overlap TCs and then insert the new entry to emulate itc ops.
+ * Notes: Only TC entry can purge and insert.
+ * 1 indicates this is MMIO
+ */
+int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
+ u64 ifa, int type)
+{
+ u64 ps;
+ u64 phy_pte;
+ union ia64_rr vrr, mrr;
+ int ret = 0;
+
+ ps = itir_ps(itir);
+ vrr.val = vcpu_get_rr(v, ifa);
+ mrr.val = ia64_get_rr(ifa);
+
+ phy_pte = translate_phy_pte(&pte, itir, ifa);
+
+ /* Ensure WB attribute if pte is related to a normal mem page,
+ * which is required by vga acceleration since qemu maps shared
+ * vram buffer with WB.
+ */
+ if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
+ pte &= ~_PAGE_MA_MASK;
+ phy_pte &= ~_PAGE_MA_MASK;
+ }
+
+ if (pte & VTLB_PTE_IO)
+ ret = 1;
+
+ vtlb_purge(v, ifa, ps);
+ vhpt_purge(v, ifa, ps);
+
+ if (ps == mrr.ps) {
+ if (!(pte&VTLB_PTE_IO)) {
+ vhpt_insert(phy_pte, itir, ifa, pte);
+ } else {
+ vtlb_insert(v, pte, itir, ifa);
+ vcpu_quick_region_set(VMX(v, tc_regions), ifa);
+ }
+ } else if (ps > mrr.ps) {
+ vtlb_insert(v, pte, itir, ifa);
+ vcpu_quick_region_set(VMX(v, tc_regions), ifa);
+ if (!(pte&VTLB_PTE_IO))
+ vhpt_insert(phy_pte, itir, ifa, pte);
+ } else {
+ u64 psr;
+ phy_pte &= ~PAGE_FLAGS_RV_MASK;
+ psr = ia64_clear_ic();
+ ia64_itc(type, ifa, phy_pte, ps);
+ ia64_set_psr(psr);
+ }
+ if (!(pte&VTLB_PTE_IO))
+ mark_pages_dirty(v, pte, ps);
+
+ return ret;
+}
+
+/*
+ * Purge all TCs or VHPT entries including those in Hash table.
+ *
+ */
+
+void thash_purge_all(struct kvm_vcpu *v)
+{
+ int i;
+ struct thash_data *head;
+ struct thash_cb *vtlb, *vhpt;
+ vtlb = &v->arch.vtlb;
+ vhpt = &v->arch.vhpt;
+
+ for (i = 0; i < 8; i++)
+ VMX(v, psbits[i]) = 0;
+
+ head = vtlb->hash;
+ for (i = 0; i < vtlb->num; i++) {
+ head->page_flags = 0;
+ head->etag = INVALID_TI_TAG;
+ head->itir = 0;
+ head->next = 0;
+ head++;
+ };
+
+ head = vhpt->hash;
+ for (i = 0; i < vhpt->num; i++) {
+ head->page_flags = 0;
+ head->etag = INVALID_TI_TAG;
+ head->itir = 0;
+ head->next = 0;
+ head++;
+ };
+
+ local_flush_tlb_all();
+}
+
+
+/*
+ * Lookup the hash table and its collision chain to find an entry
+ * covering this address rid:va or the entry.
+ *
+ * INPUT:
+ * in: TLB format for both VHPT & TLB.
+ */
+
+struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
+{
+ struct thash_data *cch;
+ u64 psbits, ps, tag;
+ union ia64_rr vrr;
+
+ struct thash_cb *hcb = &v->arch.vtlb;
+
+ cch = __vtr_lookup(v, va, is_data);;
+ if (cch)
+ return cch;
+
+ if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
+ return NULL;
+
+ psbits = VMX(v, psbits[(va >> 61)]);
+ vrr.val = vcpu_get_rr(v, va);
+ while (psbits) {
+ ps = __ffs(psbits);
+ psbits &= ~(1UL << ps);
+ vrr.ps = ps;
+ cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
+ if (cch->etag == tag && cch->ps == ps)
+ return cch;
+ }
+
+ return NULL;
+}
+
+
+/*
+ * Initialize internal control data before service.
+ */
+void thash_init(struct thash_cb *hcb, u64 sz)
+{
+ int i;
+ struct thash_data *head;
+
+ hcb->pta.val = (unsigned long)hcb->hash;
+ hcb->pta.vf = 1;
+ hcb->pta.ve = 1;
+ hcb->pta.size = sz;
+ head = hcb->hash;
+ for (i = 0; i < hcb->num; i++) {
+ head->page_flags = 0;
+ head->itir = 0;
+ head->etag = INVALID_TI_TAG;
+ head->next = 0;
+ head++;
+ }
+}
+
+u64 kvm_lookup_mpa(u64 gpfn)
+{
+ u64 *base = (u64 *) KVM_P2M_BASE;
+ return *(base + gpfn);
+}
+
+u64 kvm_gpa_to_mpa(u64 gpa)
+{
+ u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
+ return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
+}
+
+
+/*
+ * Fetch guest bundle code.
+ * INPUT:
+ * gip: guest ip
+ * pbundle: used to return fetched bundle.
+ */
+int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
+{
+ u64 gpip = 0; /* guest physical IP*/
+ u64 *vpa;
+ struct thash_data *tlb;
+ u64 maddr;
+
+ if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
+ /* I-side physical mode */
+ gpip = gip;
+ } else {
+ tlb = vtlb_lookup(vcpu, gip, I_TLB);
+ if (tlb)
+ gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
+ (gip & (PSIZE(tlb->ps) - 1));
+ }
+ if (gpip) {
+ maddr = kvm_gpa_to_mpa(gpip);
+ } else {
+ tlb = vhpt_lookup(gip);
+ if (tlb == NULL) {
+ ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
+ return IA64_FAULT;
+ }
+ maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
+ | (gip & (PSIZE(tlb->ps) - 1));
+ }
+ vpa = (u64 *)__kvm_va(maddr);
+
+ pbundle->i64[0] = *vpa++;
+ pbundle->i64[1] = *vpa;
+
+ return IA64_NO_FAULT;
+}
+
+
+void kvm_init_vhpt(struct kvm_vcpu *v)
+{
+ v->arch.vhpt.num = VHPT_NUM_ENTRIES;
+ thash_init(&v->arch.vhpt, VHPT_SHIFT);
+ ia64_set_pta(v->arch.vhpt.pta.val);
+ /*Enable VHPT here?*/
+}
+
+void kvm_init_vtlb(struct kvm_vcpu *v)
+{
+ v->arch.vtlb.num = VTLB_NUM_ENTRIES;
+ thash_init(&v->arch.vtlb, VTLB_SHIFT);
+}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 20f45a8b87e..4e40c122bf2 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -803,3 +803,4 @@ config PPC_CLOCK
config PPC_LIB_RHEAP
bool
+source "arch/powerpc/kvm/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index a86d8d85321..807a2dce626 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -151,6 +151,9 @@ config BOOTX_TEXT
config PPC_EARLY_DEBUG
bool "Early debugging (dangerous)"
+ # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water
+ # mark, which doesn't work with current 440 KVM.
+ depends on !KVM
help
Say Y to enable some early debugging facilities that may be available
for your processor/board combination. Those facilities are hacks
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index e2ec4a91cce..9dcdc036cdf 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -145,6 +145,7 @@ core-y += arch/powerpc/kernel/ \
arch/powerpc/platforms/
core-$(CONFIG_MATH_EMULATION) += arch/powerpc/math-emu/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
+core-$(CONFIG_KVM) += arch/powerpc/kvm/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index adf1d09d726..62134845af0 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -23,6 +23,9 @@
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/hrtimer.h>
+#ifdef CONFIG_KVM
+#include <linux/kvm_host.h>
+#endif
#ifdef CONFIG_PPC64
#include <linux/time.h>
#include <linux/hardirq.h>
@@ -324,5 +327,30 @@ int main(void)
DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
+#ifdef CONFIG_KVM
+ DEFINE(TLBE_BYTES, sizeof(struct tlbe));
+
+ DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
+ DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
+ DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
+ DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
+ DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
+ DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+ DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+ DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+ DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
+ DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
+ DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
+ DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
+ DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
+ DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid));
+
+ DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+ DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
+ DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
new file mode 100644
index 00000000000..f5d7a5eab96
--- /dev/null
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -0,0 +1,224 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <asm/mmu-44x.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
+#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
+
+static unsigned int kvmppc_tlb_44x_pos;
+
+static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
+{
+ /* Mask off reserved bits. */
+ attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
+
+ if (!usermode) {
+ /* Guest is in supervisor mode, so we need to translate guest
+ * supervisor permissions into user permissions. */
+ attrib &= ~PPC44x_TLB_USER_PERM_MASK;
+ attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
+ }
+
+ /* Make sure host can always access this memory. */
+ attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
+
+ return attrib;
+}
+
+/* Search the guest TLB for a matching entry. */
+int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
+ unsigned int as)
+{
+ int i;
+
+ /* XXX Replace loop with fancy data structures. */
+ for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+ struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
+ unsigned int tid;
+
+ if (eaddr < get_tlb_eaddr(tlbe))
+ continue;
+
+ if (eaddr > get_tlb_end(tlbe))
+ continue;
+
+ tid = get_tlb_tid(tlbe);
+ if (tid && (tid != pid))
+ continue;
+
+ if (!get_tlb_v(tlbe))
+ continue;
+
+ if (get_tlb_ts(tlbe) != as)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+{
+ unsigned int as = !!(vcpu->arch.msr & MSR_IS);
+ unsigned int index;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+ if (index == -1)
+ return NULL;
+ return &vcpu->arch.guest_tlb[index];
+}
+
+struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+{
+ unsigned int as = !!(vcpu->arch.msr & MSR_DS);
+ unsigned int index;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+ if (index == -1)
+ return NULL;
+ return &vcpu->arch.guest_tlb[index];
+}
+
+static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
+{
+ return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
+}
+
+/* Must be called with mmap_sem locked for writing. */
+static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
+ unsigned int index)
+{
+ struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
+ struct page *page = vcpu->arch.shadow_pages[index];
+
+ kunmap(vcpu->arch.shadow_pages[index]);
+
+ if (get_tlb_v(stlbe)) {
+ if (kvmppc_44x_tlbe_is_writable(stlbe))
+ kvm_release_page_dirty(page);
+ else
+ kvm_release_page_clean(page);
+ }
+}
+
+/* Caller must ensure that the specified guest TLB entry is safe to insert into
+ * the shadow TLB. */
+void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
+ u32 flags)
+{
+ struct page *new_page;
+ struct tlbe *stlbe;
+ hpa_t hpaddr;
+ unsigned int victim;
+
+ /* Future optimization: don't overwrite the TLB entry containing the
+ * current PC (or stack?). */
+ victim = kvmppc_tlb_44x_pos++;
+ if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
+ kvmppc_tlb_44x_pos = 0;
+ stlbe = &vcpu->arch.shadow_tlb[victim];
+
+ /* Get reference to new page. */
+ down_write(&current->mm->mmap_sem);
+ new_page = gfn_to_page(vcpu->kvm, gfn);
+ if (is_error_page(new_page)) {
+ printk(KERN_ERR "Couldn't get guest page!\n");
+ kvm_release_page_clean(new_page);
+ return;
+ }
+ hpaddr = page_to_phys(new_page);
+
+ /* Drop reference to old page. */
+ kvmppc_44x_shadow_release(vcpu, victim);
+ up_write(&current->mm->mmap_sem);
+
+ vcpu->arch.shadow_pages[victim] = new_page;
+
+ /* XXX Make sure (va, size) doesn't overlap any other
+ * entries. 440x6 user manual says the result would be
+ * "undefined." */
+
+ /* XXX what about AS? */
+
+ stlbe->tid = asid & 0xff;
+
+ /* Force TS=1 for all guest mappings. */
+ /* For now we hardcode 4KB mappings, but it will be important to
+ * use host large pages in the future. */
+ stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
+ | PPC44x_TLB_4K;
+
+ stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
+ stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
+ vcpu->arch.msr & MSR_PR);
+}
+
+void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
+{
+ unsigned int pid = asid & 0xff;
+ int i;
+
+ /* XXX Replace loop with fancy data structures. */
+ down_write(&current->mm->mmap_sem);
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
+ unsigned int tid;
+
+ if (!get_tlb_v(stlbe))
+ continue;
+
+ if (eaddr < get_tlb_eaddr(stlbe))
+ continue;
+
+ if (eaddr > get_tlb_end(stlbe))
+ continue;
+
+ tid = get_tlb_tid(stlbe);
+ if (tid && (tid != pid))
+ continue;
+
+ kvmppc_44x_shadow_release(vcpu, i);
+ stlbe->word0 = 0;
+ }
+ up_write(&current->mm->mmap_sem);
+}
+
+/* Invalidate all mappings, so that when they fault back in they will get the
+ * proper permission bits. */
+void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+{
+ int i;
+
+ /* XXX Replace loop with fancy data structures. */
+ down_write(&current->mm->mmap_sem);
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ kvmppc_44x_shadow_release(vcpu, i);
+ vcpu->arch.shadow_tlb[i].word0 = 0;
+ }
+ up_write(&current->mm->mmap_sem);
+}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
new file mode 100644
index 00000000000..2ccd46b6f6b
--- /dev/null
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -0,0 +1,91 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __KVM_POWERPC_TLB_H__
+#define __KVM_POWERPC_TLB_H__
+
+#include <linux/kvm_host.h>
+#include <asm/mmu-44x.h>
+
+extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
+ unsigned int pid, unsigned int as);
+extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+
+/* TLB helper functions */
+static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
+{
+ return (tlbe->word0 >> 4) & 0xf;
+}
+
+static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
+{
+ return tlbe->word0 & 0xfffffc00;
+}
+
+static inline gva_t get_tlb_bytes(const struct tlbe *tlbe)
+{
+ unsigned int pgsize = get_tlb_size(tlbe);
+ return 1 << 10 << (pgsize << 1);
+}
+
+static inline gva_t get_tlb_end(const struct tlbe *tlbe)
+{
+ return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
+}
+
+static inline u64 get_tlb_raddr(const struct tlbe *tlbe)
+{
+ u64 word1 = tlbe->word1;
+ return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
+}
+
+static inline unsigned int get_tlb_tid(const struct tlbe *tlbe)
+{
+ return tlbe->tid & 0xff;
+}
+
+static inline unsigned int get_tlb_ts(const struct tlbe *tlbe)
+{
+ return (tlbe->word0 >> 8) & 0x1;
+}
+
+static inline unsigned int get_tlb_v(const struct tlbe *tlbe)
+{
+ return (tlbe->word0 >> 9) & 0x1;
+}
+
+static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.mmucr & 0xff;
+}
+
+static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.mmucr >> 16) & 0x1;
+}
+
+static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr)
+{
+ unsigned int pgmask = get_tlb_bytes(tlbe) - 1;
+
+ return get_tlb_raddr(tlbe) | (eaddr & pgmask);
+}
+
+#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
new file mode 100644
index 00000000000..6b076010213
--- /dev/null
+++ b/arch/powerpc/kvm/Kconfig
@@ -0,0 +1,42 @@
+#
+# KVM configuration
+#
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ ---help---
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ bool "Kernel-based Virtual Machine (KVM) support"
+ depends on 44x && EXPERIMENTAL
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ # We can only run on Book E hosts so far
+ select KVM_BOOKE_HOST
+ ---help---
+ Support hosting virtualized guest machines. You will also
+ need to select one or more of the processor modules below.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ If unsure, say N.
+
+config KVM_BOOKE_HOST
+ bool "KVM host support for Book E PowerPC processors"
+ depends on KVM && 44x
+ ---help---
+ Provides host support for KVM on Book E PowerPC processors. Currently
+ this works on 440 processors only.
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
new file mode 100644
index 00000000000..d0d358d367e
--- /dev/null
+++ b/arch/powerpc/kvm/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
+
+kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o
+obj-$(CONFIG_KVM) += kvm.o
+
+AFLAGS_booke_interrupts.o := -I$(obj)
+
+kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o
+obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
new file mode 100644
index 00000000000..6d9884a6884
--- /dev/null
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -0,0 +1,615 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputable.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { "exits", VCPU_STAT(sum_exits) },
+ { "mmio", VCPU_STAT(mmio_exits) },
+ { "dcr", VCPU_STAT(dcr_exits) },
+ { "sig", VCPU_STAT(signal_exits) },
+ { "light", VCPU_STAT(light_exits) },
+ { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
+ { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
+ { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
+ { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
+ { "sysc", VCPU_STAT(syscall_exits) },
+ { "isi", VCPU_STAT(isi_exits) },
+ { "dsi", VCPU_STAT(dsi_exits) },
+ { "inst_emu", VCPU_STAT(emulated_inst_exits) },
+ { "dec", VCPU_STAT(dec_exits) },
+ { "ext_intr", VCPU_STAT(ext_intr_exits) },
+ { NULL }
+};
+
+static const u32 interrupt_msr_mask[16] = {
+ [BOOKE_INTERRUPT_CRITICAL] = MSR_ME,
+ [BOOKE_INTERRUPT_MACHINE_CHECK] = 0,
+ [BOOKE_INTERRUPT_DATA_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_INST_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_EXTERNAL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_ALIGNMENT] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_PROGRAM] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_FP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_SYSCALL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_AP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_DECREMENTER] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_FIT] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_WATCHDOG] = MSR_ME,
+ [BOOKE_INTERRUPT_DTLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_ITLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_DEBUG] = MSR_ME,
+};
+
+const unsigned char exception_priority[] = {
+ [BOOKE_INTERRUPT_DATA_STORAGE] = 0,
+ [BOOKE_INTERRUPT_INST_STORAGE] = 1,
+ [BOOKE_INTERRUPT_ALIGNMENT] = 2,
+ [BOOKE_INTERRUPT_PROGRAM] = 3,
+ [BOOKE_INTERRUPT_FP_UNAVAIL] = 4,
+ [BOOKE_INTERRUPT_SYSCALL] = 5,
+ [BOOKE_INTERRUPT_AP_UNAVAIL] = 6,
+ [BOOKE_INTERRUPT_DTLB_MISS] = 7,
+ [BOOKE_INTERRUPT_ITLB_MISS] = 8,
+ [BOOKE_INTERRUPT_MACHINE_CHECK] = 9,
+ [BOOKE_INTERRUPT_DEBUG] = 10,
+ [BOOKE_INTERRUPT_CRITICAL] = 11,
+ [BOOKE_INTERRUPT_WATCHDOG] = 12,
+ [BOOKE_INTERRUPT_EXTERNAL] = 13,
+ [BOOKE_INTERRUPT_FIT] = 14,
+ [BOOKE_INTERRUPT_DECREMENTER] = 15,
+};
+
+const unsigned char priority_exception[] = {
+ BOOKE_INTERRUPT_DATA_STORAGE,
+ BOOKE_INTERRUPT_INST_STORAGE,
+ BOOKE_INTERRUPT_ALIGNMENT,
+ BOOKE_INTERRUPT_PROGRAM,
+ BOOKE_INTERRUPT_FP_UNAVAIL,
+ BOOKE_INTERRUPT_SYSCALL,
+ BOOKE_INTERRUPT_AP_UNAVAIL,
+ BOOKE_INTERRUPT_DTLB_MISS,
+ BOOKE_INTERRUPT_ITLB_MISS,
+ BOOKE_INTERRUPT_MACHINE_CHECK,
+ BOOKE_INTERRUPT_DEBUG,
+ BOOKE_INTERRUPT_CRITICAL,
+ BOOKE_INTERRUPT_WATCHDOG,
+ BOOKE_INTERRUPT_EXTERNAL,
+ BOOKE_INTERRUPT_FIT,
+ BOOKE_INTERRUPT_DECREMENTER,
+};
+
+
+void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
+{
+ struct tlbe *tlbe;
+ int i;
+
+ printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
+ printk("| %2s | %3s | %8s | %8s | %8s |\n",
+ "nr", "tid", "word0", "word1", "word2");
+
+ for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+ tlbe = &vcpu->arch.guest_tlb[i];
+ if (tlbe->word0 & PPC44x_TLB_VALID)
+ printk(" G%2d | %02X | %08X | %08X | %08X |\n",
+ i, tlbe->tid, tlbe->word0, tlbe->word1,
+ tlbe->word2);
+ }
+
+ for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+ tlbe = &vcpu->arch.shadow_tlb[i];
+ if (tlbe->word0 & PPC44x_TLB_VALID)
+ printk(" S%2d | %02X | %08X | %08X | %08X |\n",
+ i, tlbe->tid, tlbe->word0, tlbe->word1,
+ tlbe->word2);
+ }
+}
+
+/* TODO: use vcpu_printf() */
+void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ printk("pc: %08x msr: %08x\n", vcpu->arch.pc, vcpu->arch.msr);
+ printk("lr: %08x ctr: %08x\n", vcpu->arch.lr, vcpu->arch.ctr);
+ printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1);
+
+ printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+ for (i = 0; i < 32; i += 4) {
+ printk("gpr%02d: %08x %08x %08x %08x\n", i,
+ vcpu->arch.gpr[i],
+ vcpu->arch.gpr[i+1],
+ vcpu->arch.gpr[i+2],
+ vcpu->arch.gpr[i+3]);
+ }
+}
+
+/* Check if we are ready to deliver the interrupt */
+static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
+{
+ int r;
+
+ switch (interrupt) {
+ case BOOKE_INTERRUPT_CRITICAL:
+ r = vcpu->arch.msr & MSR_CE;
+ break;
+ case BOOKE_INTERRUPT_MACHINE_CHECK:
+ r = vcpu->arch.msr & MSR_ME;
+ break;
+ case BOOKE_INTERRUPT_EXTERNAL:
+ r = vcpu->arch.msr & MSR_EE;
+ break;
+ case BOOKE_INTERRUPT_DECREMENTER:
+ r = vcpu->arch.msr & MSR_EE;
+ break;
+ case BOOKE_INTERRUPT_FIT:
+ r = vcpu->arch.msr & MSR_EE;
+ break;
+ case BOOKE_INTERRUPT_WATCHDOG:
+ r = vcpu->arch.msr & MSR_CE;
+ break;
+ case BOOKE_INTERRUPT_DEBUG:
+ r = vcpu->arch.msr & MSR_DE;
+ break;
+ default:
+ r = 1;
+ }
+
+ return r;
+}
+
+static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
+{
+ switch (interrupt) {
+ case BOOKE_INTERRUPT_DECREMENTER:
+ vcpu->arch.tsr |= TSR_DIS;
+ break;
+ }
+
+ vcpu->arch.srr0 = vcpu->arch.pc;
+ vcpu->arch.srr1 = vcpu->arch.msr;
+ vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt];
+ kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]);
+}
+
+/* Check pending exceptions and deliver one, if possible. */
+void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
+{
+ unsigned long *pending = &vcpu->arch.pending_exceptions;
+ unsigned int exception;
+ unsigned int priority;
+
+ priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending));
+ while (priority <= BOOKE_MAX_INTERRUPT) {
+ exception = priority_exception[priority];
+ if (kvmppc_can_deliver_interrupt(vcpu, exception)) {
+ kvmppc_clear_exception(vcpu, exception);
+ kvmppc_deliver_interrupt(vcpu, exception);
+ break;
+ }
+
+ priority = find_next_bit(pending,
+ BITS_PER_BYTE * sizeof(*pending),
+ priority + 1);
+ }
+}
+
+static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ int r;
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ /* We must reload nonvolatiles because "update" load/store
+ * instructions modify register state. */
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_HOST_NV;
+ break;
+ case EMULATE_FAIL:
+ /* XXX Deliver Program interrupt to guest. */
+ printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
+ vcpu->arch.last_inst);
+ r = RESUME_HOST;
+ break;
+ default:
+ BUG();
+ }
+
+ return r;
+}
+
+/**
+ * kvmppc_handle_exit
+ *
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
+{
+ enum emulation_result er;
+ int r = RESUME_HOST;
+
+ local_irq_enable();
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+
+ switch (exit_nr) {
+ case BOOKE_INTERRUPT_MACHINE_CHECK:
+ printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
+ kvmppc_dump_vcpu(vcpu);
+ r = RESUME_HOST;
+ break;
+
+ case BOOKE_INTERRUPT_EXTERNAL:
+ case BOOKE_INTERRUPT_DECREMENTER:
+ /* Since we switched IVPR back to the host's value, the host
+ * handled this interrupt the moment we enabled interrupts.
+ * Now we just offer it a chance to reschedule the guest. */
+
+ /* XXX At this point the TLB still holds our shadow TLB, so if
+ * we do reschedule the host will fault over it. Perhaps we
+ * should politely restore the host's entries to minimize
+ * misses before ceding control. */
+ if (need_resched())
+ cond_resched();
+ if (exit_nr == BOOKE_INTERRUPT_DECREMENTER)
+ vcpu->stat.dec_exits++;
+ else
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_PROGRAM:
+ if (vcpu->arch.msr & MSR_PR) {
+ /* Program traps generated by user-level software must be handled
+ * by the guest kernel. */
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* Future optimization: only reload non-volatiles if
+ * they were actually modified by emulation. */
+ vcpu->stat.emulated_inst_exits++;
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_DO_DCR:
+ run->exit_reason = KVM_EXIT_DCR;
+ r = RESUME_HOST;
+ break;
+ case EMULATE_FAIL:
+ /* XXX Deliver Program interrupt to guest. */
+ printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n",
+ __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+ /* For debugging, encode the failing instruction and
+ * report it to userspace. */
+ run->hw.hardware_exit_reason = ~0ULL << 32;
+ run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+ r = RESUME_HOST;
+ break;
+ default:
+ BUG();
+ }
+ break;
+
+ case BOOKE_INTERRUPT_DATA_STORAGE:
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.dsi_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_INST_STORAGE:
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.isi_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_SYSCALL:
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.syscall_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_DTLB_MISS: {
+ struct tlbe *gtlbe;
+ unsigned long eaddr = vcpu->arch.fault_dear;
+ gfn_t gfn;
+
+ /* Check the guest TLB. */
+ gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr);
+ if (!gtlbe) {
+ /* The guest didn't have a mapping for it. */
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ vcpu->stat.dtlb_real_miss_exits++;
+ r = RESUME_GUEST;
+ break;
+ }
+
+ vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
+ gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;
+
+ if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+ /* The guest TLB had a mapping, but the shadow TLB
+ * didn't, and it is RAM. This could be because:
+ * a) the entry is mapping the host kernel, or
+ * b) the guest used a large mapping which we're faking
+ * Either way, we need to satisfy the fault without
+ * invoking the guest. */
+ kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
+ gtlbe->word2);
+ vcpu->stat.dtlb_virt_miss_exits++;
+ r = RESUME_GUEST;
+ } else {
+ /* Guest has mapped and accessed a page which is not
+ * actually RAM. */
+ r = kvmppc_emulate_mmio(run, vcpu);
+ }
+
+ break;
+ }
+
+ case BOOKE_INTERRUPT_ITLB_MISS: {
+ struct tlbe *gtlbe;
+ unsigned long eaddr = vcpu->arch.pc;
+ gfn_t gfn;
+
+ r = RESUME_GUEST;
+
+ /* Check the guest TLB. */
+ gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr);
+ if (!gtlbe) {
+ /* The guest didn't have a mapping for it. */
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.itlb_real_miss_exits++;
+ break;
+ }
+
+ vcpu->stat.itlb_virt_miss_exits++;
+
+ gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT;
+
+ if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+ /* The guest TLB had a mapping, but the shadow TLB
+ * didn't. This could be because:
+ * a) the entry is mapping the host kernel, or
+ * b) the guest used a large mapping which we're faking
+ * Either way, we need to satisfy the fault without
+ * invoking the guest. */
+ kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
+ gtlbe->word2);
+ } else {
+ /* Guest mapped and leaped at non-RAM! */
+ kvmppc_queue_exception(vcpu,
+ BOOKE_INTERRUPT_MACHINE_CHECK);
+ }
+
+ break;
+ }
+
+ default:
+ printk(KERN_EMERG "exit_nr %d\n", exit_nr);
+ BUG();
+ }
+
+ local_irq_disable();
+
+ kvmppc_check_and_deliver_interrupts(vcpu);
+
+ /* Do some exit accounting. */
+ vcpu->stat.sum_exits++;
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(current)) {
+ run->exit_reason = KVM_EXIT_INTR;
+ r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
+
+ vcpu->stat.signal_exits++;
+ } else {
+ vcpu->stat.light_exits++;
+ }
+ } else {
+ switch (run->exit_reason) {
+ case KVM_EXIT_MMIO:
+ vcpu->stat.mmio_exits++;
+ break;
+ case KVM_EXIT_DCR:
+ vcpu->stat.dcr_exits++;
+ break;
+ case KVM_EXIT_INTR:
+ vcpu->stat.signal_exits++;
+ break;
+ }
+ }
+
+ return r;
+}
+
+/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct tlbe *tlbe = &vcpu->arch.guest_tlb[0];
+
+ tlbe->tid = 0;
+ tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
+ tlbe->word1 = 0;
+ tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
+
+ tlbe++;
+ tlbe->tid = 0;
+ tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
+ tlbe->word1 = 0xef600000;
+ tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
+ | PPC44x_TLB_I | PPC44x_TLB_G;
+
+ vcpu->arch.pc = 0;
+ vcpu->arch.msr = 0;
+ vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
+
+ /* Eye-catching number so we know if the guest takes an interrupt
+ * before it's programmed its own IVPR. */
+ vcpu->arch.ivpr = 0x55550000;
+
+ /* Since the guest can directly access the timebase, it must know the
+ * real timebase frequency. Accordingly, it must see the state of
+ * CCR1[TCS]. */
+ vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ regs->pc = vcpu->arch.pc;
+ regs->cr = vcpu->arch.cr;
+ regs->ctr = vcpu->arch.ctr;
+ regs->lr = vcpu->arch.lr;
+ regs->xer = vcpu->arch.xer;
+ regs->msr = vcpu->arch.msr;
+ regs->srr0 = vcpu->arch.srr0;
+ regs->srr1 = vcpu->arch.srr1;
+ regs->pid = vcpu->arch.pid;
+ regs->sprg0 = vcpu->arch.sprg0;
+ regs->sprg1 = vcpu->arch.sprg1;
+ regs->sprg2 = vcpu->arch.sprg2;
+ regs->sprg3 = vcpu->arch.sprg3;
+ regs->sprg5 = vcpu->arch.sprg4;
+ regs->sprg6 = vcpu->arch.sprg5;
+ regs->sprg7 = vcpu->arch.sprg6;
+
+ for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+ regs->gpr[i] = vcpu->arch.gpr[i];
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ vcpu->arch.pc = regs->pc;
+ vcpu->arch.cr = regs->cr;
+ vcpu->arch.ctr = regs->ctr;
+ vcpu->arch.lr = regs->lr;
+ vcpu->arch.xer = regs->xer;
+ vcpu->arch.msr = regs->msr;
+ vcpu->arch.srr0 = regs->srr0;
+ vcpu->arch.srr1 = regs->srr1;
+ vcpu->arch.sprg0 = regs->sprg0;
+ vcpu->arch.sprg1 = regs->sprg1;
+ vcpu->arch.sprg2 = regs->sprg2;
+ vcpu->arch.sprg3 = regs->sprg3;
+ vcpu->arch.sprg5 = regs->sprg4;
+ vcpu->arch.sprg6 = regs->sprg5;
+ vcpu->arch.sprg7 = regs->sprg6;
+
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
+ vcpu->arch.gpr[i] = regs->gpr[i];
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ struct tlbe *gtlbe;
+ int index;
+ gva_t eaddr;
+ u8 pid;
+ u8 as;
+
+ eaddr = tr->linear_address;
+ pid = (tr->linear_address >> 32) & 0xff;
+ as = (tr->linear_address >> 40) & 0x1;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
+ if (index == -1) {
+ tr->valid = 0;
+ return 0;
+ }
+
+ gtlbe = &vcpu->arch.guest_tlb[index];
+
+ tr->physical_address = tlb_xlate(gtlbe, eaddr);
+ /* XXX what does "writeable" and "usermode" even mean? */
+ tr->valid = 1;
+
+ return 0;
+}
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c
new file mode 100644
index 00000000000..b480341bc31
--- /dev/null
+++ b/arch/powerpc/kvm/booke_host.c
@@ -0,0 +1,83 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include <asm/kvm_ppc.h>
+
+unsigned long kvmppc_booke_handlers;
+
+static int kvmppc_booke_init(void)
+{
+ unsigned long ivor[16];
+ unsigned long max_ivor = 0;
+ int i;
+
+ /* We install our own exception handlers by hijacking IVPR. IVPR must
+ * be 16-bit aligned, so we need a 64KB allocation. */
+ kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ VCPU_SIZE_ORDER);
+ if (!kvmppc_booke_handlers)
+ return -ENOMEM;
+
+ /* XXX make sure our handlers are smaller than Linux's */
+
+ /* Copy our interrupt handlers to match host IVORs. That way we don't
+ * have to swap the IVORs on every guest/host transition. */
+ ivor[0] = mfspr(SPRN_IVOR0);
+ ivor[1] = mfspr(SPRN_IVOR1);
+ ivor[2] = mfspr(SPRN_IVOR2);
+ ivor[3] = mfspr(SPRN_IVOR3);
+ ivor[4] = mfspr(SPRN_IVOR4);
+ ivor[5] = mfspr(SPRN_IVOR5);
+ ivor[6] = mfspr(SPRN_IVOR6);
+ ivor[7] = mfspr(SPRN_IVOR7);
+ ivor[8] = mfspr(SPRN_IVOR8);
+ ivor[9] = mfspr(SPRN_IVOR9);
+ ivor[10] = mfspr(SPRN_IVOR10);
+ ivor[11] = mfspr(SPRN_IVOR11);
+ ivor[12] = mfspr(SPRN_IVOR12);
+ ivor[13] = mfspr(SPRN_IVOR13);
+ ivor[14] = mfspr(SPRN_IVOR14);
+ ivor[15] = mfspr(SPRN_IVOR15);
+
+ for (i = 0; i < 16; i++) {
+ if (ivor[i] > max_ivor)
+ max_ivor = ivor[i];
+
+ memcpy((void *)kvmppc_booke_handlers + ivor[i],
+ kvmppc_handlers_start + i * kvmppc_handler_len,
+ kvmppc_handler_len);
+ }
+ flush_icache_range(kvmppc_booke_handlers,
+ kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
+
+ return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+}
+
+static void __exit kvmppc_booke_exit(void)
+{
+ free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
+ kvm_exit();
+}
+
+module_init(kvmppc_booke_init)
+module_exit(kvmppc_booke_exit)
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
new file mode 100644
index 00000000000..3b653b5309b
--- /dev/null
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -0,0 +1,436 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/mmu-44x.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+
+#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
+
+#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
+
+/* The host stack layout: */
+#define HOST_R1 0 /* Implied by stwu. */
+#define HOST_CALLEE_LR 4
+#define HOST_RUN 8
+/* r2 is special: it holds 'current', and it made nonvolatile in the
+ * kernel with the -ffixed-r2 gcc option. */
+#define HOST_R2 12
+#define HOST_NV_GPRS 16
+#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
+#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
+#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
+
+#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
+ (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
+ (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
+ (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
+ (1<<BOOKE_INTERRUPT_PROGRAM) | \
+ (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+.macro KVM_HANDLER ivor_nr
+_GLOBAL(kvmppc_handler_\ivor_nr)
+ /* Get pointer to vcpu and record exit number. */
+ mtspr SPRN_SPRG0, r4
+ mfspr r4, SPRN_SPRG1
+ stw r5, VCPU_GPR(r5)(r4)
+ stw r6, VCPU_GPR(r6)(r4)
+ mfctr r5
+ lis r6, kvmppc_resume_host@h
+ stw r5, VCPU_CTR(r4)
+ li r5, \ivor_nr
+ ori r6, r6, kvmppc_resume_host@l
+ mtctr r6
+ bctr
+.endm
+
+_GLOBAL(kvmppc_handlers_start)
+KVM_HANDLER BOOKE_INTERRUPT_CRITICAL
+KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK
+KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE
+KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE
+KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL
+KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT
+KVM_HANDLER BOOKE_INTERRUPT_PROGRAM
+KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL
+KVM_HANDLER BOOKE_INTERRUPT_SYSCALL
+KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL
+KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER
+KVM_HANDLER BOOKE_INTERRUPT_FIT
+KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG
+KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS
+KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS
+KVM_HANDLER BOOKE_INTERRUPT_DEBUG
+
+_GLOBAL(kvmppc_handler_len)
+ .long kvmppc_handler_1 - kvmppc_handler_0
+
+
+/* Registers:
+ * SPRG0: guest r4
+ * r4: vcpu pointer
+ * r5: KVM exit number
+ */
+_GLOBAL(kvmppc_resume_host)
+ stw r3, VCPU_GPR(r3)(r4)
+ mfcr r3
+ stw r3, VCPU_CR(r4)
+ stw r7, VCPU_GPR(r7)(r4)
+ stw r8, VCPU_GPR(r8)(r4)
+ stw r9, VCPU_GPR(r9)(r4)
+
+ li r6, 1
+ slw r6, r6, r5
+
+ /* Save the faulting instruction and all GPRs for emulation. */
+ andi. r7, r6, NEED_INST_MASK
+ beq ..skip_inst_copy
+ mfspr r9, SPRN_SRR0
+ mfmsr r8
+ ori r7, r8, MSR_DS
+ mtmsr r7
+ isync
+ lwz r9, 0(r9)
+ mtmsr r8
+ isync
+ stw r9, VCPU_LAST_INST(r4)
+
+ stw r15, VCPU_GPR(r15)(r4)
+ stw r16, VCPU_GPR(r16)(r4)
+ stw r17, VCPU_GPR(r17)(r4)
+ stw r18, VCPU_GPR(r18)(r4)
+ stw r19, VCPU_GPR(r19)(r4)
+ stw r20, VCPU_GPR(r20)(r4)
+ stw r21, VCPU_GPR(r21)(r4)
+ stw r22, VCPU_GPR(r22)(r4)
+ stw r23, VCPU_GPR(r23)(r4)
+ stw r24, VCPU_GPR(r24)(r4)
+ stw r25, VCPU_GPR(r25)(r4)
+ stw r26, VCPU_GPR(r26)(r4)
+ stw r27, VCPU_GPR(r27)(r4)
+ stw r28, VCPU_GPR(r28)(r4)
+ stw r29, VCPU_GPR(r29)(r4)
+ stw r30, VCPU_GPR(r30)(r4)
+ stw r31, VCPU_GPR(r31)(r4)
+..skip_inst_copy:
+
+ /* Also grab DEAR and ESR before the host can clobber them. */
+
+ andi. r7, r6, NEED_DEAR_MASK
+ beq ..skip_dear
+ mfspr r9, SPRN_DEAR
+ stw r9, VCPU_FAULT_DEAR(r4)
+..skip_dear:
+
+ andi. r7, r6, NEED_ESR_MASK
+ beq ..skip_esr
+ mfspr r9, SPRN_ESR
+ stw r9, VCPU_FAULT_ESR(r4)
+..skip_esr:
+
+ /* Save remaining volatile guest register state to vcpu. */
+ stw r0, VCPU_GPR(r0)(r4)
+ stw r1, VCPU_GPR(r1)(r4)
+ stw r2, VCPU_GPR(r2)(r4)
+ stw r10, VCPU_GPR(r10)(r4)
+ stw r11, VCPU_GPR(r11)(r4)
+ stw r12, VCPU_GPR(r12)(r4)
+ stw r13, VCPU_GPR(r13)(r4)
+ stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */
+ mflr r3
+ stw r3, VCPU_LR(r4)
+ mfxer r3
+ stw r3, VCPU_XER(r4)
+ mfspr r3, SPRN_SPRG0
+ stw r3, VCPU_GPR(r4)(r4)
+ mfspr r3, SPRN_SRR0
+ stw r3, VCPU_PC(r4)
+
+ /* Restore host stack pointer and PID before IVPR, since the host
+ * exception handlers use them. */
+ lwz r1, VCPU_HOST_STACK(r4)
+ lwz r3, VCPU_HOST_PID(r4)
+ mtspr SPRN_PID, r3
+
+ /* Restore host IVPR before re-enabling interrupts. We cheat and know
+ * that Linux IVPR is always 0xc0000000. */
+ lis r3, 0xc000
+ mtspr SPRN_IVPR, r3
+
+ /* Switch to kernel stack and jump to handler. */
+ LOAD_REG_ADDR(r3, kvmppc_handle_exit)
+ mtctr r3
+ lwz r3, HOST_RUN(r1)
+ lwz r2, HOST_R2(r1)
+ mr r14, r4 /* Save vcpu pointer. */
+
+ bctrl /* kvmppc_handle_exit() */
+
+ /* Restore vcpu pointer and the nonvolatiles we used. */
+ mr r4, r14
+ lwz r14, VCPU_GPR(r14)(r4)
+
+ /* Sometimes instruction emulation must restore complete GPR state. */
+ andi. r5, r3, RESUME_FLAG_NV
+ beq ..skip_nv_load
+ lwz r15, VCPU_GPR(r15)(r4)
+ lwz r16, VCPU_GPR(r16)(r4)
+ lwz r17, VCPU_GPR(r17)(r4)
+ lwz r18, VCPU_GPR(r18)(r4)
+ lwz r19, VCPU_GPR(r19)(r4)
+ lwz r20, VCPU_GPR(r20)(r4)
+ lwz r21, VCPU_GPR(r21)(r4)
+ lwz r22, VCPU_GPR(r22)(r4)
+ lwz r23, VCPU_GPR(r23)(r4)
+ lwz r24, VCPU_GPR(r24)(r4)
+ lwz r25, VCPU_GPR(r25)(r4)
+ lwz r26, VCPU_GPR(r26)(r4)
+ lwz r27, VCPU_GPR(r27)(r4)
+ lwz r28, VCPU_GPR(r28)(r4)
+ lwz r29, VCPU_GPR(r29)(r4)
+ lwz r30, VCPU_GPR(r30)(r4)
+ lwz r31, VCPU_GPR(r31)(r4)
+..skip_nv_load:
+
+ /* Should we return to the guest? */
+ andi. r5, r3, RESUME_FLAG_HOST
+ beq lightweight_exit
+
+ srawi r3, r3, 2 /* Shift -ERR back down. */
+
+heavyweight_exit:
+ /* Not returning to guest. */
+
+ /* We already saved guest volatile register state; now save the
+ * non-volatiles. */
+ stw r15, VCPU_GPR(r15)(r4)
+ stw r16, VCPU_GPR(r16)(r4)
+ stw r17, VCPU_GPR(r17)(r4)
+ stw r18, VCPU_GPR(r18)(r4)
+ stw r19, VCPU_GPR(r19)(r4)
+ stw r20, VCPU_GPR(r20)(r4)
+ stw r21, VCPU_GPR(r21)(r4)
+ stw r22, VCPU_GPR(r22)(r4)
+ stw r23, VCPU_GPR(r23)(r4)
+ stw r24, VCPU_GPR(r24)(r4)
+ stw r25, VCPU_GPR(r25)(r4)
+ stw r26, VCPU_GPR(r26)(r4)
+ stw r27, VCPU_GPR(r27)(r4)
+ stw r28, VCPU_GPR(r28)(r4)
+ stw r29, VCPU_GPR(r29)(r4)
+ stw r30, VCPU_GPR(r30)(r4)
+ stw r31, VCPU_GPR(r31)(r4)
+
+ /* Load host non-volatile register state from host stack. */
+ lwz r14, HOST_NV_GPR(r14)(r1)
+ lwz r15, HOST_NV_GPR(r15)(r1)
+ lwz r16, HOST_NV_GPR(r16)(r1)
+ lwz r17, HOST_NV_GPR(r17)(r1)
+ lwz r18, HOST_NV_GPR(r18)(r1)
+ lwz r19, HOST_NV_GPR(r19)(r1)
+ lwz r20, HOST_NV_GPR(r20)(r1)
+ lwz r21, HOST_NV_GPR(r21)(r1)
+ lwz r22, HOST_NV_GPR(r22)(r1)
+ lwz r23, HOST_NV_GPR(r23)(r1)
+ lwz r24, HOST_NV_GPR(r24)(r1)
+ lwz r25, HOST_NV_GPR(r25)(r1)
+ lwz r26, HOST_NV_GPR(r26)(r1)
+ lwz r27, HOST_NV_GPR(r27)(r1)
+ lwz r28, HOST_NV_GPR(r28)(r1)
+ lwz r29, HOST_NV_GPR(r29)(r1)
+ lwz r30, HOST_NV_GPR(r30)(r1)
+ lwz r31, HOST_NV_GPR(r31)(r1)
+
+ /* Return to kvm_vcpu_run(). */
+ lwz r4, HOST_STACK_LR(r1)
+ addi r1, r1, HOST_STACK_SIZE
+ mtlr r4
+ /* r3 still contains the return code from kvmppc_handle_exit(). */
+ blr
+
+
+/* Registers:
+ * r3: kvm_run pointer
+ * r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcpu_run)
+ stwu r1, -HOST_STACK_SIZE(r1)
+ stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
+
+ /* Save host state to stack. */
+ stw r3, HOST_RUN(r1)
+ mflr r3
+ stw r3, HOST_STACK_LR(r1)
+
+ /* Save host non-volatile register state to stack. */
+ stw r14, HOST_NV_GPR(r14)(r1)
+ stw r15, HOST_NV_GPR(r15)(r1)
+ stw r16, HOST_NV_GPR(r16)(r1)
+ stw r17, HOST_NV_GPR(r17)(r1)
+ stw r18, HOST_NV_GPR(r18)(r1)
+ stw r19, HOST_NV_GPR(r19)(r1)
+ stw r20, HOST_NV_GPR(r20)(r1)
+ stw r21, HOST_NV_GPR(r21)(r1)
+ stw r22, HOST_NV_GPR(r22)(r1)
+ stw r23, HOST_NV_GPR(r23)(r1)
+ stw r24, HOST_NV_GPR(r24)(r1)
+ stw r25, HOST_NV_GPR(r25)(r1)
+ stw r26, HOST_NV_GPR(r26)(r1)
+ stw r27, HOST_NV_GPR(r27)(r1)
+ stw r28, HOST_NV_GPR(r28)(r1)
+ stw r29, HOST_NV_GPR(r29)(r1)
+ stw r30, HOST_NV_GPR(r30)(r1)
+ stw r31, HOST_NV_GPR(r31)(r1)
+
+ /* Load guest non-volatiles. */
+ lwz r14, VCPU_GPR(r14)(r4)
+ lwz r15, VCPU_GPR(r15)(r4)
+ lwz r16, VCPU_GPR(r16)(r4)
+ lwz r17, VCPU_GPR(r17)(r4)
+ lwz r18, VCPU_GPR(r18)(r4)
+ lwz r19, VCPU_GPR(r19)(r4)
+ lwz r20, VCPU_GPR(r20)(r4)
+ lwz r21, VCPU_GPR(r21)(r4)
+ lwz r22, VCPU_GPR(r22)(r4)
+ lwz r23, VCPU_GPR(r23)(r4)
+ lwz r24, VCPU_GPR(r24)(r4)
+ lwz r25, VCPU_GPR(r25)(r4)
+ lwz r26, VCPU_GPR(r26)(r4)
+ lwz r27, VCPU_GPR(r27)(r4)
+ lwz r28, VCPU_GPR(r28)(r4)
+ lwz r29, VCPU_GPR(r29)(r4)
+ lwz r30, VCPU_GPR(r30)(r4)
+ lwz r31, VCPU_GPR(r31)(r4)
+
+lightweight_exit:
+ stw r2, HOST_R2(r1)
+
+ mfspr r3, SPRN_PID
+ stw r3, VCPU_HOST_PID(r4)
+ lwz r3, VCPU_PID(r4)
+ mtspr SPRN_PID, r3
+
+ /* Prevent all TLB updates. */
+ mfmsr r5
+ lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
+ ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
+ andc r6, r5, r6
+ mtmsr r6
+
+ /* Save the host's non-pinned TLB mappings, and load the guest mappings
+ * over them. Leave the host's "pinned" kernel mappings in place. */
+ /* XXX optimization: use generation count to avoid swapping unmodified
+ * entries. */
+ mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
+ lis r8, tlb_44x_hwater@ha
+ lwz r8, tlb_44x_hwater@l(r8)
+ addi r3, r4, VCPU_HOST_TLB - 4
+ addi r9, r4, VCPU_SHADOW_TLB - 4
+ li r6, 0
+1:
+ /* Save host entry. */
+ tlbre r7, r6, PPC44x_TLB_PAGEID
+ mfspr r5, SPRN_MMUCR
+ stwu r5, 4(r3)
+ stwu r7, 4(r3)
+ tlbre r7, r6, PPC44x_TLB_XLAT
+ stwu r7, 4(r3)
+ tlbre r7, r6, PPC44x_TLB_ATTRIB
+ stwu r7, 4(r3)
+ /* Load guest entry. */
+ lwzu r7, 4(r9)
+ mtspr SPRN_MMUCR, r7
+ lwzu r7, 4(r9)
+ tlbwe r7, r6, PPC44x_TLB_PAGEID
+ lwzu r7, 4(r9)
+ tlbwe r7, r6, PPC44x_TLB_XLAT
+ lwzu r7, 4(r9)
+ tlbwe r7, r6, PPC44x_TLB_ATTRIB
+ /* Increment index. */
+ addi r6, r6, 1
+ cmpw r6, r8
+ blt 1b
+ mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
+
+ iccci 0, 0 /* XXX hack */
+
+ /* Load some guest volatiles. */
+ lwz r0, VCPU_GPR(r0)(r4)
+ lwz r2, VCPU_GPR(r2)(r4)
+ lwz r9, VCPU_GPR(r9)(r4)
+ lwz r10, VCPU_GPR(r10)(r4)
+ lwz r11, VCPU_GPR(r11)(r4)
+ lwz r12, VCPU_GPR(r12)(r4)
+ lwz r13, VCPU_GPR(r13)(r4)
+ lwz r3, VCPU_LR(r4)
+ mtlr r3
+ lwz r3, VCPU_XER(r4)
+ mtxer r3
+
+ /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
+ * so how do we make sure vcpu won't fault? */
+ lis r8, kvmppc_booke_handlers@ha
+ lwz r8, kvmppc_booke_handlers@l(r8)
+ mtspr SPRN_IVPR, r8
+
+ /* Save vcpu pointer for the exception handlers. */
+ mtspr SPRN_SPRG1, r4
+
+ /* Can't switch the stack pointer until after IVPR is switched,
+ * because host interrupt handlers would get confused. */
+ lwz r1, VCPU_GPR(r1)(r4)
+
+ /* XXX handle USPRG0 */
+ /* Host interrupt handlers may have clobbered these guest-readable
+ * SPRGs, so we need to reload them here with the guest's values. */
+ lwz r3, VCPU_SPRG4(r4)
+ mtspr SPRN_SPRG4, r3
+ lwz r3, VCPU_SPRG5(r4)
+ mtspr SPRN_SPRG5, r3
+ lwz r3, VCPU_SPRG6(r4)
+ mtspr SPRN_SPRG6, r3
+ lwz r3, VCPU_SPRG7(r4)
+ mtspr SPRN_SPRG7, r3
+
+ /* Finish loading guest volatiles and jump to guest. */
+ lwz r3, VCPU_CTR(r4)
+ mtctr r3
+ lwz r3, VCPU_CR(r4)
+ mtcr r3
+ lwz r5, VCPU_GPR(r5)(r4)
+ lwz r6, VCPU_GPR(r6)(r4)
+ lwz r7, VCPU_GPR(r7)(r4)
+ lwz r8, VCPU_GPR(r8)(r4)
+ lwz r3, VCPU_PC(r4)
+ mtsrr0 r3
+ lwz r3, VCPU_MSR(r4)
+ oris r3, r3, KVMPPC_MSR_MASK@h
+ ori r3, r3, KVMPPC_MSR_MASK@l
+ mtsrr1 r3
+ lwz r3, VCPU_GPR(r3)(r4)
+ lwz r4, VCPU_GPR(r4)(r4)
+ rfi
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
new file mode 100644
index 00000000000..a03fe0c8069
--- /dev/null
+++ b/arch/powerpc/kvm/emulate.c
@@ -0,0 +1,760 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm_host.h>
+
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/time.h>
+#include <asm/byteorder.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+/* Instruction decoding */
+static inline unsigned int get_op(u32 inst)
+{
+ return inst >> 26;
+}
+
+static inline unsigned int get_xop(u32 inst)
+{
+ return (inst >> 1) & 0x3ff;
+}
+
+static inline unsigned int get_sprn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_dcrn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_rt(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_rs(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_ra(u32 inst)
+{
+ return (inst >> 16) & 0x1f;
+}
+
+static inline unsigned int get_rb(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_rc(u32 inst)
+{
+ return inst & 0x1;
+}
+
+static inline unsigned int get_ws(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_d(u32 inst)
+{
+ return inst & 0xffff;
+}
+
+static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
+ const struct tlbe *tlbe)
+{
+ gpa_t gpa;
+
+ if (!get_tlb_v(tlbe))
+ return 0;
+
+ /* Does it match current guest AS? */
+ /* XXX what about IS != DS? */
+ if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
+ return 0;
+
+ gpa = get_tlb_raddr(tlbe);
+ if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
+ /* Mapping is not for RAM. */
+ return 0;
+
+ return 1;
+}
+
+static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
+{
+ u64 eaddr;
+ u64 raddr;
+ u64 asid;
+ u32 flags;
+ struct tlbe *tlbe;
+ unsigned int ra;
+ unsigned int rs;
+ unsigned int ws;
+ unsigned int index;
+
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ ws = get_ws(inst);
+
+ index = vcpu->arch.gpr[ra];
+ if (index > PPC44x_TLB_SIZE) {
+ printk("%s: index %d\n", __func__, index);
+ kvmppc_dump_vcpu(vcpu);
+ return EMULATE_FAIL;
+ }
+
+ tlbe = &vcpu->arch.guest_tlb[index];
+
+ /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
+ if (tlbe->word0 & PPC44x_TLB_VALID) {
+ eaddr = get_tlb_eaddr(tlbe);
+ asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+ kvmppc_mmu_invalidate(vcpu, eaddr, asid);
+ }
+
+ switch (ws) {
+ case PPC44x_TLB_PAGEID:
+ tlbe->tid = vcpu->arch.mmucr & 0xff;
+ tlbe->word0 = vcpu->arch.gpr[rs];
+ break;
+
+ case PPC44x_TLB_XLAT:
+ tlbe->word1 = vcpu->arch.gpr[rs];
+ break;
+
+ case PPC44x_TLB_ATTRIB:
+ tlbe->word2 = vcpu->arch.gpr[rs];
+ break;
+
+ default:
+ return EMULATE_FAIL;
+ }
+
+ if (tlbe_is_host_safe(vcpu, tlbe)) {
+ eaddr = get_tlb_eaddr(tlbe);
+ raddr = get_tlb_raddr(tlbe);
+ asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+ flags = tlbe->word2 & 0xffff;
+
+ /* Create a 4KB mapping on the host. If the guest wanted a
+ * large page, only the first 4KB is mapped here and the rest
+ * are mapped on the fly. */
+ kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
+ }
+
+ return EMULATE_DONE;
+}
+
+static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.tcr & TCR_DIE) {
+ /* The decrementer ticks at the same rate as the timebase, so
+ * that's how we convert the guest DEC value to the number of
+ * host ticks. */
+ unsigned long nr_jiffies;
+
+ nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
+ mod_timer(&vcpu->arch.dec_timer,
+ get_jiffies_64() + nr_jiffies);
+ } else {
+ del_timer(&vcpu->arch.dec_timer);
+ }
+}
+
+static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pc = vcpu->arch.srr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.srr1);
+}
+
+/* XXX to do:
+ * lhax
+ * lhaux
+ * lswx
+ * lswi
+ * stswx
+ * stswi
+ * lha
+ * lhau
+ * lmw
+ * stmw
+ *
+ * XXX is_bigendian should depend on MMU mapping or MSR[LE]
+ */
+int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ u32 inst = vcpu->arch.last_inst;
+ u32 ea;
+ int ra;
+ int rb;
+ int rc;
+ int rs;
+ int rt;
+ int sprn;
+ int dcrn;
+ enum emulation_result emulated = EMULATE_DONE;
+ int advance = 1;
+
+ switch (get_op(inst)) {
+ case 3: /* trap */
+ printk("trap!\n");
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+ advance = 0;
+ break;
+
+ case 19:
+ switch (get_xop(inst)) {
+ case 50: /* rfi */
+ kvmppc_emul_rfi(vcpu);
+ advance = 0;
+ break;
+
+ default:
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 31:
+ switch (get_xop(inst)) {
+
+ case 83: /* mfmsr */
+ rt = get_rt(inst);
+ vcpu->arch.gpr[rt] = vcpu->arch.msr;
+ break;
+
+ case 87: /* lbzx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ break;
+
+ case 131: /* wrtee */
+ rs = get_rs(inst);
+ vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+ | (vcpu->arch.gpr[rs] & MSR_EE);
+ break;
+
+ case 146: /* mtmsr */
+ rs = get_rs(inst);
+ kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
+ break;
+
+ case 163: /* wrteei */
+ vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+ | (inst & MSR_EE);
+ break;
+
+ case 215: /* stbx */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 1, 1);
+ break;
+
+ case 247: /* stbux */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 1, 1);
+ vcpu->arch.gpr[rs] = ea;
+ break;
+
+ case 279: /* lhzx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ break;
+
+ case 311: /* lhzux */
+ rt = get_rt(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ vcpu->arch.gpr[ra] = ea;
+ break;
+
+ case 323: /* mfdcr */
+ dcrn = get_dcrn(inst);
+ rt = get_rt(inst);
+
+ /* The guest may access CPR0 registers to determine the timebase
+ * frequency, and it must know the real host frequency because it
+ * can directly access the timebase registers.
+ *
+ * It would be possible to emulate those accesses in userspace,
+ * but userspace can really only figure out the end frequency.
+ * We could decompose that into the factors that compute it, but
+ * that's tricky math, and it's easier to just report the real
+ * CPR0 values.
+ */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
+ break;
+ case DCRN_CPR0_CONFIG_DATA:
+ local_irq_disable();
+ mtdcr(DCRN_CPR0_CONFIG_ADDR,
+ vcpu->arch.cpr0_cfgaddr);
+ vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
+ local_irq_enable();
+ break;
+ default:
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = 0;
+ run->dcr.is_write = 0;
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.dcr_needed = 1;
+ emulated = EMULATE_DO_DCR;
+ }
+
+ break;
+
+ case 339: /* mfspr */
+ sprn = get_sprn(inst);
+ rt = get_rt(inst);
+
+ switch (sprn) {
+ case SPRN_SRR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
+ case SPRN_SRR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
+ case SPRN_MMUCR:
+ vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
+ case SPRN_PID:
+ vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
+ case SPRN_IVPR:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
+ case SPRN_CCR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
+ case SPRN_CCR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
+ case SPRN_PVR:
+ vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
+ case SPRN_DEAR:
+ vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
+ case SPRN_ESR:
+ vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
+ case SPRN_DBCR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
+ case SPRN_DBCR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
+
+ /* Note: mftb and TBRL/TBWL are user-accessible, so
+ * the guest can always access the real TB anyways.
+ * In fact, we probably will never see these traps. */
+ case SPRN_TBWL:
+ vcpu->arch.gpr[rt] = mftbl(); break;
+ case SPRN_TBWU:
+ vcpu->arch.gpr[rt] = mftbu(); break;
+
+ case SPRN_SPRG0:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
+ case SPRN_SPRG1:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
+ case SPRN_SPRG2:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
+ case SPRN_SPRG3:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
+ /* Note: SPRG4-7 are user-readable, so we don't get
+ * a trap. */
+
+ case SPRN_IVOR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
+ case SPRN_IVOR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
+ case SPRN_IVOR2:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
+ case SPRN_IVOR3:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
+ case SPRN_IVOR4:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
+ case SPRN_IVOR5:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
+ case SPRN_IVOR6:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
+ case SPRN_IVOR7:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
+ case SPRN_IVOR8:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
+ case SPRN_IVOR9:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
+ case SPRN_IVOR10:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
+ case SPRN_IVOR11:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
+ case SPRN_IVOR12:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
+ case SPRN_IVOR13:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
+ case SPRN_IVOR14:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
+ case SPRN_IVOR15:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
+
+ default:
+ printk("mfspr: unknown spr %x\n", sprn);
+ vcpu->arch.gpr[rt] = 0;
+ break;
+ }
+ break;
+
+ case 407: /* sthx */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 2, 1);
+ break;
+
+ case 439: /* sthux */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 2, 1);
+ vcpu->arch.gpr[ra] = ea;
+ break;
+
+ case 451: /* mtdcr */
+ dcrn = get_dcrn(inst);
+ rs = get_rs(inst);
+
+ /* emulate some access in kernel */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
+ break;
+ default:
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = vcpu->arch.gpr[rs];
+ run->dcr.is_write = 1;
+ vcpu->arch.dcr_needed = 1;
+ emulated = EMULATE_DO_DCR;
+ }
+
+ break;
+
+ case 467: /* mtspr */
+ sprn = get_sprn(inst);
+ rs = get_rs(inst);
+ switch (sprn) {
+ case SPRN_SRR0:
+ vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SRR1:
+ vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_MMUCR:
+ vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
+ case SPRN_PID:
+ vcpu->arch.pid = vcpu->arch.gpr[rs]; break;
+ case SPRN_CCR0:
+ vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_CCR1:
+ vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_DEAR:
+ vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
+ case SPRN_ESR:
+ vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
+ case SPRN_DBCR0:
+ vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_DBCR1:
+ vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
+
+ /* XXX We need to context-switch the timebase for
+ * watchdog and FIT. */
+ case SPRN_TBWL: break;
+ case SPRN_TBWU: break;
+
+ case SPRN_DEC:
+ vcpu->arch.dec = vcpu->arch.gpr[rs];
+ kvmppc_emulate_dec(vcpu);
+ break;
+
+ case SPRN_TSR:
+ vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
+
+ case SPRN_TCR:
+ vcpu->arch.tcr = vcpu->arch.gpr[rs];
+ kvmppc_emulate_dec(vcpu);
+ break;
+
+ case SPRN_SPRG0:
+ vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG1:
+ vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG2:
+ vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG3:
+ vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
+
+ /* Note: SPRG4-7 are user-readable. These values are
+ * loaded into the real SPRGs when resuming the
+ * guest. */
+ case SPRN_SPRG4:
+ vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG5:
+ vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG6:
+ vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG7:
+ vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
+
+ case SPRN_IVPR:
+ vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR0:
+ vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR1:
+ vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR2:
+ vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR3:
+ vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR4:
+ vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR5:
+ vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR6:
+ vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR7:
+ vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR8:
+ vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR9:
+ vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR10:
+ vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR11:
+ vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR12:
+ vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR13:
+ vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR14:
+ vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR15:
+ vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
+
+ default:
+ printk("mtspr: unknown spr %x\n", sprn);
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 470: /* dcbi */
+ /* Do nothing. The guest is performing dcbi because
+ * hardware DMA is not snooped by the dcache, but
+ * emulated DMA either goes through the dcache as
+ * normal writes, or the host kernel has handled dcache
+ * coherence. */
+ break;
+
+ case 534: /* lwbrx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
+ break;
+
+ case 566: /* tlbsync */
+ break;
+
+ case 662: /* stwbrx */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 4, 0);
+ break;
+
+ case 978: /* tlbwe */
+ emulated = kvmppc_emul_tlbwe(vcpu, inst);
+ break;
+
+ case 914: { /* tlbsx */
+ int index;
+ unsigned int as = get_mmucr_sts(vcpu);
+ unsigned int pid = get_mmucr_stid(vcpu);
+
+ rt = get_rt(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+ rc = get_rc(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
+ if (rc) {
+ if (index < 0)
+ vcpu->arch.cr &= ~0x20000000;
+ else
+ vcpu->arch.cr |= 0x20000000;
+ }
+ vcpu->arch.gpr[rt] = index;
+
+ }
+ break;
+
+ case 790: /* lhbrx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
+ break;
+
+ case 918: /* sthbrx */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 2, 0);
+ break;
+
+ case 966: /* iccci */
+ break;
+
+ default:
+ printk("unknown: op %d xop %d\n", get_op(inst),
+ get_xop(inst));
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 32: /* lwz */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
+ break;
+
+ case 33: /* lwzu */
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 34: /* lbz */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ break;
+
+ case 35: /* lbzu */
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 36: /* stw */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 4, 1);
+ break;
+
+ case 37: /* stwu */
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 4, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 38: /* stb */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 1, 1);
+ break;
+
+ case 39: /* stbu */
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 1, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 40: /* lhz */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ break;
+
+ case 41: /* lhzu */
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 44: /* sth */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 2, 1);
+ break;
+
+ case 45: /* sthu */
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 2, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ default:
+ printk("unknown op %d\n", get_op(inst));
+ emulated = EMULATE_FAIL;
+ break;
+ }
+
+ if (advance)
+ vcpu->arch.pc += 4; /* Advance past emulated instruction. */
+
+ return emulated;
+}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
new file mode 100644
index 00000000000..bad40bd2d3a
--- /dev/null
+++ b/arch/powerpc/kvm/powerpc.c
@@ -0,0 +1,436 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputable.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_ppc.h>
+
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn;
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
+{
+ /* XXX implement me */
+ return 0;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+{
+ return 1;
+}
+
+
+int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ int r;
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ /* We must reload nonvolatiles because "update" load/store
+ * instructions modify register state. */
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_HOST_NV;
+ break;
+ case EMULATE_FAIL:
+ /* XXX Deliver Program interrupt to guest. */
+ printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
+ vcpu->arch.last_inst);
+ r = RESUME_HOST;
+ break;
+ default:
+ BUG();
+ }
+
+ return r;
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ int r;
+
+ if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
+ r = 0;
+ else
+ r = -ENOTSUPP;
+
+ *(int *)rtn = r;
+}
+
+struct kvm *kvm_arch_create_vm(void)
+{
+ struct kvm *kvm;
+
+ kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ if (!kvm)
+ return ERR_PTR(-ENOMEM);
+
+ return kvm;
+}
+
+static void kvmppc_free_vcpus(struct kvm *kvm)
+{
+ unsigned int i;
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (kvm->vcpus[i]) {
+ kvm_arch_vcpu_free(kvm->vcpus[i]);
+ kvm->vcpus[i] = NULL;
+ }
+ }
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kvmppc_free_vcpus(kvm);
+ kvm_free_physmem(kvm);
+ kfree(kvm);
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_USER_MEMORY:
+ r = 1;
+ break;
+ default:
+ r = 0;
+ break;
+ }
+ return r;
+
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvm_vcpu *vcpu;
+ int err;
+
+ vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ if (!vcpu) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ return vcpu;
+
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+out:
+ return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER];
+
+ return test_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static void kvmppc_decrementer_func(unsigned long data)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func,
+ (unsigned long)vcpu);
+
+ return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
+{
+ return -ENOTSUPP;
+}
+
+static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
+ struct kvm_run *run)
+{
+ u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+ *gpr = run->dcr.data;
+}
+
+static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
+ struct kvm_run *run)
+{
+ u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+
+ if (run->mmio.len > sizeof(*gpr)) {
+ printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
+ return;
+ }
+
+ if (vcpu->arch.mmio_is_bigendian) {
+ switch (run->mmio.len) {
+ case 4: *gpr = *(u32 *)run->mmio.data; break;
+ case 2: *gpr = *(u16 *)run->mmio.data; break;
+ case 1: *gpr = *(u8 *)run->mmio.data; break;
+ }
+ } else {
+ /* Convert BE data from userland back to LE. */
+ switch (run->mmio.len) {
+ case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
+ case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
+ case 1: *gpr = *(u8 *)run->mmio.data; break;
+ }
+ }
+}
+
+int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes, int is_bigendian)
+{
+ if (bytes > sizeof(run->mmio.data)) {
+ printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+
+ run->mmio.phys_addr = vcpu->arch.paddr_accessed;
+ run->mmio.len = bytes;
+ run->mmio.is_write = 0;
+
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.mmio_is_bigendian = is_bigendian;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 0;
+
+ return EMULATE_DO_MMIO;
+}
+
+int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ u32 val, unsigned int bytes, int is_bigendian)
+{
+ void *data = run->mmio.data;
+
+ if (bytes > sizeof(run->mmio.data)) {
+ printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+
+ run->mmio.phys_addr = vcpu->arch.paddr_accessed;
+ run->mmio.len = bytes;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+
+ /* Store the value at the lowest bytes in 'data'. */
+ if (is_bigendian) {
+ switch (bytes) {
+ case 4: *(u32 *)data = val; break;
+ case 2: *(u16 *)data = val; break;
+ case 1: *(u8 *)data = val; break;
+ }
+ } else {
+ /* Store LE value into 'data'. */
+ switch (bytes) {
+ case 4: st_le32(data, val); break;
+ case 2: st_le16(data, val); break;
+ case 1: *(u8 *)data = val; break;
+ }
+ }
+
+ return EMULATE_DO_MMIO;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ int r;
+ sigset_t sigsaved;
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ if (vcpu->mmio_needed) {
+ if (!vcpu->mmio_is_write)
+ kvmppc_complete_mmio_load(vcpu, run);
+ vcpu->mmio_needed = 0;
+ } else if (vcpu->arch.dcr_needed) {
+ if (!vcpu->arch.dcr_is_write)
+ kvmppc_complete_dcr_load(vcpu, run);
+ vcpu->arch.dcr_needed = 0;
+ }
+
+ kvmppc_check_and_deliver_interrupts(vcpu);
+
+ local_irq_disable();
+ kvm_guest_enter();
+ r = __kvmppc_vcpu_run(run, vcpu);
+ kvm_guest_exit();
+ local_irq_enable();
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+ return r;
+}
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
+{
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ long r;
+
+ switch (ioctl) {
+ case KVM_INTERRUPT: {
+ struct kvm_interrupt irq;
+ r = -EFAULT;
+ if (copy_from_user(&irq, argp, sizeof(irq)))
+ goto out;
+ r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+
+out:
+ return r;
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return -ENOTSUPP;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ long r;
+
+ switch (ioctl) {
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+ return 0;
+}
+
+void kvm_arch_exit(void)
+{
+}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f6a68e178fc..8f5f02160ff 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -62,6 +62,10 @@ config GENERIC_LOCKBREAK
default y
depends on SMP && PREEMPT
+config PGSTE
+ bool
+ default y if KVM
+
mainmenu "Linux Kernel Configuration"
config S390
@@ -69,6 +73,7 @@ config S390
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
+ select HAVE_KVM if 64BIT
source "init/Kconfig"
@@ -515,6 +520,13 @@ config ZFCPDUMP
Select this option if you want to build an zfcpdump enabled kernel.
Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
+config S390_GUEST
+bool "s390 guest support (EXPERIMENTAL)"
+ depends on 64BIT && EXPERIMENTAL
+ select VIRTIO
+ select VIRTIO_RING
+ help
+ Select this option if you want to run the kernel under s390 linux
endmenu
source "net/Kconfig"
@@ -536,3 +548,5 @@ source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
+
+source "arch/s390/kvm/Kconfig"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index f708be367b0..792a4e7743c 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -87,7 +87,7 @@ LDFLAGS_vmlinux := -e start
head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
- arch/s390/appldata/ arch/s390/hypfs/
+ arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 540a67f979b..68ec4083bf7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -144,6 +144,10 @@ static noinline __init void detect_machine_type(void)
/* Running on a P/390 ? */
if (cpuinfo->cpu_id.machine == 0x7490)
machine_flags |= 4;
+
+ /* Running under KVM ? */
+ if (cpuinfo->cpu_id.version == 0xfe)
+ machine_flags |= 64;
}
#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7141147e6b6..a9d18aafa5f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -316,7 +316,11 @@ static int __init early_parse_ipldelay(char *p)
early_param("ipldelay", early_parse_ipldelay);
#ifdef CONFIG_S390_SWITCH_AMODE
+#ifdef CONFIG_PGSTE
+unsigned int switch_amode = 1;
+#else
unsigned int switch_amode = 0;
+#endif
EXPORT_SYMBOL_GPL(switch_amode);
static void set_amode_and_uaccess(unsigned long user_amode,
@@ -797,9 +801,13 @@ setup_arch(char **cmdline_p)
"This machine has an IEEE fpu\n" :
"This machine has no IEEE fpu\n");
#else /* CONFIG_64BIT */
- printk((MACHINE_IS_VM) ?
- "We are running under VM (64 bit mode)\n" :
- "We are running native (64 bit mode)\n");
+ if (MACHINE_IS_VM)
+ printk("We are running under VM (64 bit mode)\n");
+ else if (MACHINE_IS_KVM) {
+ printk("We are running under KVM (64 bit mode)\n");
+ add_preferred_console("ttyS", 1, NULL);
+ } else
+ printk("We are running native (64 bit mode)\n");
#endif /* CONFIG_64BIT */
/* Save unparsed command line copy for /proc/cmdline */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c5f05b3fb2c..ca90ee3f930 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -110,6 +110,7 @@ void account_system_vtime(struct task_struct *tsk)
S390_lowcore.steal_clock -= cputime << 12;
account_system_time(tsk, 0, cputime);
}
+EXPORT_SYMBOL_GPL(account_system_vtime);
static inline void set_vtimer(__u64 expires)
{
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
new file mode 100644
index 00000000000..1761b74d639
--- /dev/null
+++ b/arch/s390/kvm/Kconfig
@@ -0,0 +1,46 @@
+#
+# KVM configuration
+#
+config HAVE_KVM
+ bool
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ default y
+ ---help---
+ Say Y here to get to see options for using your Linux host to run other
+ operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on HAVE_KVM && EXPERIMENTAL
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ select S390_SWITCH_AMODE
+ select PREEMPT
+ ---help---
+ Support hosting paravirtualized guest machines using the SIE
+ virtualization capability on the mainframe. This should work
+ on any 64bit machine.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ To compile this as a module, choose M here: the module
+ will be called kvm.
+
+ If unsure, say N.
+
+config KVM_TRACE
+ bool
+
+# OK, it's a little counter-intuitive to do this, but it puts it neatly under
+# the virtualization menu.
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
new file mode 100644
index 00000000000..e5221ec0b8e
--- /dev/null
+++ b/arch/s390/kvm/Makefile
@@ -0,0 +1,14 @@
+# Makefile for kernel virtual machines on s390
+#
+# Copyright IBM Corp. 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (version 2 only)
+# as published by the Free Software Foundation.
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/s390/kvm
+
+kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o
+obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
new file mode 100644
index 00000000000..f639a152869
--- /dev/null
+++ b/arch/s390/kvm/diag.c
@@ -0,0 +1,67 @@
+/*
+ * diag.c - handling diagnose instructions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include "kvm-s390.h"
+
+static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
+{
+ VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
+ vcpu->stat.diagnose_44++;
+ vcpu_put(vcpu);
+ schedule();
+ vcpu_load(vcpu);
+ return 0;
+}
+
+static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
+{
+ unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
+ unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff;
+
+ VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
+ switch (subcode) {
+ case 3:
+ vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
+ break;
+ case 4:
+ vcpu->run->s390_reset_flags = 0;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
+ vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
+ vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
+ vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
+ VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx",
+ vcpu->run->s390_reset_flags);
+ return -EREMOTE;
+}
+
+int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
+{
+ int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
+
+ switch (code) {
+ case 0x44:
+ return __diag_time_slice_end(vcpu);
+ case 0x308:
+ return __diag_ipl_functions(vcpu);
+ default:
+ return -ENOTSUPP;
+ }
+}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
new file mode 100644
index 00000000000..4e0633c413f
--- /dev/null
+++ b/arch/s390/kvm/gaccess.h
@@ -0,0 +1,274 @@
+/*
+ * gaccess.h - access guest memory
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+#ifndef __KVM_S390_GACCESS_H
+#define __KVM_S390_GACCESS_H
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/uaccess.h>
+
+static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
+ u64 guestaddr)
+{
+ u64 prefix = vcpu->arch.sie_block->prefix;
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if (guestaddr < 2 * PAGE_SIZE)
+ guestaddr += prefix;
+ else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
+ guestaddr -= prefix;
+
+ if (guestaddr > memsize)
+ return (void __user __force *) ERR_PTR(-EFAULT);
+
+ guestaddr += origin;
+
+ return (void __user *) guestaddr;
+}
+
+static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u64 *result)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 7);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return get_user(*result, (u64 __user *) uptr);
+}
+
+static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u32 *result)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 3);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return get_user(*result, (u32 __user *) uptr);
+}
+
+static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u16 *result)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 1);
+
+ if (IS_ERR(uptr))
+ return PTR_ERR(uptr);
+
+ return get_user(*result, (u16 __user *) uptr);
+}
+
+static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u8 *result)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return get_user(*result, (u8 __user *) uptr);
+}
+
+static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u64 value)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 7);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return put_user(value, (u64 __user *) uptr);
+}
+
+static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u32 value)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 3);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return put_user(value, (u32 __user *) uptr);
+}
+
+static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u16 value)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ BUG_ON(guestaddr & 1);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return put_user(value, (u16 __user *) uptr);
+}
+
+static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
+ u8 value)
+{
+ void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ return put_user(value, (u8 __user *) uptr);
+}
+
+
+static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
+ const void *from, unsigned long n)
+{
+ int rc;
+ unsigned long i;
+ const u8 *data = from;
+
+ for (i = 0; i < n; i++) {
+ rc = put_guest_u8(vcpu, guestdest++, *(data++));
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
+ const void *from, unsigned long n)
+{
+ u64 prefix = vcpu->arch.sie_block->prefix;
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if ((guestdest < prefix) && (guestdest + n > prefix))
+ goto slowpath;
+
+ if ((guestdest < prefix + 2 * PAGE_SIZE)
+ && (guestdest + n > prefix + 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if (guestdest < 2 * PAGE_SIZE)
+ guestdest += prefix;
+ else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
+ guestdest -= prefix;
+
+ if (guestdest + n > memsize)
+ return -EFAULT;
+
+ if (guestdest + n < guestdest)
+ return -EFAULT;
+
+ guestdest += origin;
+
+ return copy_to_user((void __user *) guestdest, from, n);
+slowpath:
+ return __copy_to_guest_slow(vcpu, guestdest, from, n);
+}
+
+static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
+ u64 guestsrc, unsigned long n)
+{
+ int rc;
+ unsigned long i;
+ u8 *data = to;
+
+ for (i = 0; i < n; i++) {
+ rc = get_guest_u8(vcpu, guestsrc++, data++);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
+ u64 guestsrc, unsigned long n)
+{
+ u64 prefix = vcpu->arch.sie_block->prefix;
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if ((guestsrc < prefix) && (guestsrc + n > prefix))
+ goto slowpath;
+
+ if ((guestsrc < prefix + 2 * PAGE_SIZE)
+ && (guestsrc + n > prefix + 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if (guestsrc < 2 * PAGE_SIZE)
+ guestsrc += prefix;
+ else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
+ guestsrc -= prefix;
+
+ if (guestsrc + n > memsize)
+ return -EFAULT;
+
+ if (guestsrc + n < guestsrc)
+ return -EFAULT;
+
+ guestsrc += origin;
+
+ return copy_from_user(to, (void __user *) guestsrc, n);
+slowpath:
+ return __copy_from_guest_slow(vcpu, to, guestsrc, n);
+}
+
+static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
+ const void *from, unsigned long n)
+{
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if (guestdest + n > memsize)
+ return -EFAULT;
+
+ if (guestdest + n < guestdest)
+ return -EFAULT;
+
+ guestdest += origin;
+
+ return copy_to_user((void __user *) guestdest, from, n);
+}
+
+static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
+ u64 guestsrc, unsigned long n)
+{
+ u64 origin = vcpu->kvm->arch.guest_origin;
+ u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+ if (guestsrc + n > memsize)
+ return -EFAULT;
+
+ if (guestsrc + n < guestsrc)
+ return -EFAULT;
+
+ guestsrc += origin;
+
+ return copy_from_user(to, (void __user *) guestsrc, n);
+}
+#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
new file mode 100644
index 00000000000..349581a2610
--- /dev/null
+++ b/arch/s390/kvm/intercept.c
@@ -0,0 +1,216 @@
+/*
+ * intercept.c - in-kernel handling for sie intercepts
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/errno.h>
+#include <linux/pagemap.h>
+
+#include <asm/kvm_host.h>
+
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+static int handle_lctg(struct kvm_vcpu *vcpu)
+{
+ int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+ int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
+ ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
+ u64 useraddr;
+ int reg, rc;
+
+ vcpu->stat.instruction_lctg++;
+ if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
+ return -ENOTSUPP;
+
+ useraddr = disp2;
+ if (base2)
+ useraddr += vcpu->arch.guest_gprs[base2];
+
+ reg = reg1;
+
+ VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
+ disp2);
+
+ do {
+ rc = get_guest_u64(vcpu, useraddr,
+ &vcpu->arch.sie_block->gcr[reg]);
+ if (rc == -EFAULT) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ break;
+ }
+ useraddr += 8;
+ if (reg == reg3)
+ break;
+ reg = (reg + 1) % 16;
+ } while (1);
+ return 0;
+}
+
+static int handle_lctl(struct kvm_vcpu *vcpu)
+{
+ int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+ int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 useraddr;
+ u32 val = 0;
+ int reg, rc;
+
+ vcpu->stat.instruction_lctl++;
+
+ useraddr = disp2;
+ if (base2)
+ useraddr += vcpu->arch.guest_gprs[base2];
+
+ VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
+ disp2);
+
+ reg = reg1;
+ do {
+ rc = get_guest_u32(vcpu, useraddr, &val);
+ if (rc == -EFAULT) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ break;
+ }
+ vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
+ vcpu->arch.sie_block->gcr[reg] |= val;
+ useraddr += 4;
+ if (reg == reg3)
+ break;
+ reg = (reg + 1) % 16;
+ } while (1);
+ return 0;
+}
+
+static intercept_handler_t instruction_handlers[256] = {
+ [0x83] = kvm_s390_handle_diag,
+ [0xae] = kvm_s390_handle_sigp,
+ [0xb2] = kvm_s390_handle_priv,
+ [0xb7] = handle_lctl,
+ [0xeb] = handle_lctg,
+};
+
+static int handle_noop(struct kvm_vcpu *vcpu)
+{
+ switch (vcpu->arch.sie_block->icptcode) {
+ case 0x10:
+ vcpu->stat.exit_external_request++;
+ break;
+ case 0x14:
+ vcpu->stat.exit_external_interrupt++;
+ break;
+ default:
+ break; /* nothing */
+ }
+ return 0;
+}
+
+static int handle_stop(struct kvm_vcpu *vcpu)
+{
+ int rc;
+
+ vcpu->stat.exit_stop_request++;
+ atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ spin_lock_bh(&vcpu->arch.local_int.lock);
+ if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
+ vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
+ rc = __kvm_s390_vcpu_store_status(vcpu,
+ KVM_S390_STORE_STATUS_NOADDR);
+ if (rc >= 0)
+ rc = -ENOTSUPP;
+ }
+
+ if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
+ vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
+ VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
+ rc = -ENOTSUPP;
+ } else
+ rc = 0;
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
+ return rc;
+}
+
+static int handle_validity(struct kvm_vcpu *vcpu)
+{
+ int viwhy = vcpu->arch.sie_block->ipb >> 16;
+ vcpu->stat.exit_validity++;
+ if (viwhy == 0x37) {
+ fault_in_pages_writeable((char __user *)
+ vcpu->kvm->arch.guest_origin +
+ vcpu->arch.sie_block->prefix,
+ PAGE_SIZE);
+ return 0;
+ }
+ VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
+ viwhy);
+ return -ENOTSUPP;
+}
+
+static int handle_instruction(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ vcpu->stat.exit_instruction++;
+ handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
+ if (handler)
+ return handler(vcpu);
+ return -ENOTSUPP;
+}
+
+static int handle_prog(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.exit_program_interruption++;
+ return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
+}
+
+static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
+{
+ int rc, rc2;
+
+ vcpu->stat.exit_instr_and_program++;
+ rc = handle_instruction(vcpu);
+ rc2 = handle_prog(vcpu);
+
+ if (rc == -ENOTSUPP)
+ vcpu->arch.sie_block->icptcode = 0x04;
+ if (rc)
+ return rc;
+ return rc2;
+}
+
+static const intercept_handler_t intercept_funcs[0x48 >> 2] = {
+ [0x00 >> 2] = handle_noop,
+ [0x04 >> 2] = handle_instruction,
+ [0x08 >> 2] = handle_prog,
+ [0x0C >> 2] = handle_instruction_and_prog,
+ [0x10 >> 2] = handle_noop,
+ [0x14 >> 2] = handle_noop,
+ [0x1C >> 2] = kvm_s390_handle_wait,
+ [0x20 >> 2] = handle_validity,
+ [0x28 >> 2] = handle_stop,
+};
+
+int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t func;
+ u8 code = vcpu->arch.sie_block->icptcode;
+
+ if (code & 3 || code > 0x48)
+ return -ENOTSUPP;
+ func = intercept_funcs[code >> 2];
+ if (func)
+ return func(vcpu);
+ return -ENOTSUPP;
+}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
new file mode 100644
index 00000000000..fcd1ed8015c
--- /dev/null
+++ b/arch/s390/kvm/interrupt.c
@@ -0,0 +1,592 @@
+/*
+ * interrupt.c - handling kvm guest interrupts
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+#include <asm/lowcore.h>
+#include <asm/uaccess.h>
+#include <linux/kvm_host.h>
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+static int psw_extint_disabled(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
+}
+
+static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
+{
+ if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
+ (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
+ (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
+ return 0;
+ return 1;
+}
+
+static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
+ struct interrupt_info *inti)
+{
+ switch (inti->type) {
+ case KVM_S390_INT_EMERGENCY:
+ if (psw_extint_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
+ return 1;
+ return 0;
+ case KVM_S390_INT_SERVICE:
+ if (psw_extint_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
+ return 1;
+ return 0;
+ case KVM_S390_INT_VIRTIO:
+ if (psw_extint_disabled(vcpu))
+ return 0;
+ if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
+ return 1;
+ return 0;
+ case KVM_S390_PROGRAM_INT:
+ case KVM_S390_SIGP_STOP:
+ case KVM_S390_SIGP_SET_PREFIX:
+ case KVM_S390_RESTART:
+ return 1;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+static void __set_cpu_idle(struct kvm_vcpu *vcpu)
+{
+ BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
+ atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
+}
+
+static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
+{
+ BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
+ atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
+}
+
+static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
+{
+ atomic_clear_mask(CPUSTAT_ECALL_PEND |
+ CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
+ &vcpu->arch.sie_block->cpuflags);
+ vcpu->arch.sie_block->lctl = 0x0000;
+}
+
+static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
+{
+ atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
+}
+
+static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
+ struct interrupt_info *inti)
+{
+ switch (inti->type) {
+ case KVM_S390_INT_EMERGENCY:
+ case KVM_S390_INT_SERVICE:
+ case KVM_S390_INT_VIRTIO:
+ if (psw_extint_disabled(vcpu))
+ __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
+ else
+ vcpu->arch.sie_block->lctl |= LCTL_CR0;
+ break;
+ case KVM_S390_SIGP_STOP:
+ __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
+ struct interrupt_info *inti)
+{
+ const unsigned short table[] = { 2, 4, 4, 6 };
+ int rc, exception = 0;
+
+ switch (inti->type) {
+ case KVM_S390_INT_EMERGENCY:
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
+ vcpu->stat.deliver_emergency_signal++;
+ rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_INT_SERVICE:
+ VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
+ inti->ext.ext_params);
+ vcpu->stat.deliver_service_signal++;
+ rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_INT_VIRTIO:
+ VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
+ inti->ext.ext_params, inti->ext.ext_params2);
+ vcpu->stat.deliver_virtio_interrupt++;
+ rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM,
+ inti->ext.ext_params2);
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_SIGP_STOP:
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
+ vcpu->stat.deliver_stop_signal++;
+ __set_intercept_indicator(vcpu, inti);
+ break;
+
+ case KVM_S390_SIGP_SET_PREFIX:
+ VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
+ inti->prefix.address);
+ vcpu->stat.deliver_prefix_signal++;
+ vcpu->arch.sie_block->prefix = inti->prefix.address;
+ vcpu->arch.sie_block->ihcpu = 0xffff;
+ break;
+
+ case KVM_S390_RESTART:
+ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
+ vcpu->stat.deliver_restart_signal++;
+ rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
+ restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ case KVM_S390_PROGRAM_INT:
+ VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
+ inti->pgm.code,
+ table[vcpu->arch.sie_block->ipa >> 14]);
+ vcpu->stat.deliver_program_int++;
+ rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = put_guest_u16(vcpu, __LC_PGM_ILC,
+ table[vcpu->arch.sie_block->ipa >> 14]);
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_PGM_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ break;
+
+ default:
+ BUG();
+ }
+
+ if (exception) {
+ VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering"
+ " interrupt");
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ if (inti->type == KVM_S390_PROGRAM_INT) {
+ printk(KERN_WARNING "kvm: recursive program check\n");
+ BUG();
+ }
+ }
+}
+
+static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
+{
+ int rc, exception = 0;
+
+ if (psw_extint_disabled(vcpu))
+ return 0;
+ if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
+ return 0;
+ rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
+ if (rc == -EFAULT)
+ exception = 1;
+ rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+ rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+ __LC_EXT_NEW_PSW, sizeof(psw_t));
+ if (rc == -EFAULT)
+ exception = 1;
+
+ if (exception) {
+ VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \
+ " ckc interrupt");
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ return 0;
+ }
+
+ return 1;
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct local_interrupt *li = &vcpu->arch.local_int;
+ struct float_interrupt *fi = vcpu->arch.local_int.float_int;
+ struct interrupt_info *inti;
+ int rc = 0;
+
+ if (atomic_read(&li->active)) {
+ spin_lock_bh(&li->lock);
+ list_for_each_entry(inti, &li->list, list)
+ if (__interrupt_is_deliverable(vcpu, inti)) {
+ rc = 1;
+ break;
+ }
+ spin_unlock_bh(&li->lock);
+ }
+
+ if ((!rc) && atomic_read(&fi->active)) {
+ spin_lock_bh(&fi->lock);
+ list_for_each_entry(inti, &fi->list, list)
+ if (__interrupt_is_deliverable(vcpu, inti)) {
+ rc = 1;
+ break;
+ }
+ spin_unlock_bh(&fi->lock);
+ }
+
+ if ((!rc) && (vcpu->arch.sie_block->ckc <
+ get_clock() + vcpu->arch.sie_block->epoch)) {
+ if ((!psw_extint_disabled(vcpu)) &&
+ (vcpu->arch.sie_block->gcr[0] & 0x800ul))
+ rc = 1;
+ }
+
+ return rc;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
+{
+ u64 now, sltime;
+ DECLARE_WAITQUEUE(wait, current);
+
+ vcpu->stat.exit_wait_state++;
+ if (kvm_cpu_has_interrupt(vcpu))
+ return 0;
+
+ if (psw_interrupts_disabled(vcpu)) {
+ VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
+ __unset_cpu_idle(vcpu);
+ return -ENOTSUPP; /* disabled wait */
+ }
+
+ if (psw_extint_disabled(vcpu) ||
+ (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
+ VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
+ goto no_timer;
+ }
+
+ now = get_clock() + vcpu->arch.sie_block->epoch;
+ if (vcpu->arch.sie_block->ckc < now) {
+ __unset_cpu_idle(vcpu);
+ return 0;
+ }
+
+ sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1;
+
+ vcpu->arch.ckc_timer.expires = jiffies + sltime;
+
+ add_timer(&vcpu->arch.ckc_timer);
+ VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
+no_timer:
+ spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+ spin_lock_bh(&vcpu->arch.local_int.lock);
+ __set_cpu_idle(vcpu);
+ vcpu->arch.local_int.timer_due = 0;
+ add_wait_queue(&vcpu->arch.local_int.wq, &wait);
+ while (list_empty(&vcpu->arch.local_int.list) &&
+ list_empty(&vcpu->arch.local_int.float_int->list) &&
+ (!vcpu->arch.local_int.timer_due) &&
+ !signal_pending(current)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
+ spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
+ vcpu_put(vcpu);
+ schedule();
+ vcpu_load(vcpu);
+ spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+ spin_lock_bh(&vcpu->arch.local_int.lock);
+ }
+ __unset_cpu_idle(vcpu);
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&vcpu->wq, &wait);
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
+ spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
+ del_timer(&vcpu->arch.ckc_timer);
+ return 0;
+}
+
+void kvm_s390_idle_wakeup(unsigned long data)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+ spin_lock_bh(&vcpu->arch.local_int.lock);
+ vcpu->arch.local_int.timer_due = 1;
+ if (waitqueue_active(&vcpu->arch.local_int.wq))
+ wake_up_interruptible(&vcpu->arch.local_int.wq);
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
+}
+
+
+void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
+{
+ struct local_interrupt *li = &vcpu->arch.local_int;
+ struct float_interrupt *fi = vcpu->arch.local_int.float_int;
+ struct interrupt_info *n, *inti = NULL;
+ int deliver;
+
+ __reset_intercept_indicators(vcpu);
+ if (atomic_read(&li->active)) {
+ do {
+ deliver = 0;
+ spin_lock_bh(&li->lock);
+ list_for_each_entry_safe(inti, n, &li->list, list) {
+ if (__interrupt_is_deliverable(vcpu, inti)) {
+ list_del(&inti->list);
+ deliver = 1;
+ break;
+ }
+ __set_intercept_indicator(vcpu, inti);
+ }
+ if (list_empty(&li->list))
+ atomic_set(&li->active, 0);
+ spin_unlock_bh(&li->lock);
+ if (deliver) {
+ __do_deliver_interrupt(vcpu, inti);
+ kfree(inti);
+ }
+ } while (deliver);
+ }
+
+ if ((vcpu->arch.sie_block->ckc <
+ get_clock() + vcpu->arch.sie_block->epoch))
+ __try_deliver_ckc_interrupt(vcpu);
+
+ if (atomic_read(&fi->active)) {
+ do {
+ deliver = 0;
+ spin_lock_bh(&fi->lock);
+ list_for_each_entry_safe(inti, n, &fi->list, list) {
+ if (__interrupt_is_deliverable(vcpu, inti)) {
+ list_del(&inti->list);
+ deliver = 1;
+ break;
+ }
+ __set_intercept_indicator(vcpu, inti);
+ }
+ if (list_empty(&fi->list))
+ atomic_set(&fi->active, 0);
+ spin_unlock_bh(&fi->lock);
+ if (deliver) {
+ __do_deliver_interrupt(vcpu, inti);
+ kfree(inti);
+ }
+ } while (deliver);
+ }
+}
+
+int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
+{
+ struct local_interrupt *li = &vcpu->arch.local_int;
+ struct interrupt_info *inti;
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ inti->type = KVM_S390_PROGRAM_INT;;
+ inti->pgm.code = code;
+
+ VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
+ spin_lock_bh(&li->lock);
+ list_add(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ BUG_ON(waitqueue_active(&li->wq));
+ spin_unlock_bh(&li->lock);
+ return 0;
+}
+
+int kvm_s390_inject_vm(struct kvm *kvm,
+ struct kvm_s390_interrupt *s390int)
+{
+ struct local_interrupt *li;
+ struct float_interrupt *fi;
+ struct interrupt_info *inti;
+ int sigcpu;
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ switch (s390int->type) {
+ case KVM_S390_INT_VIRTIO:
+ VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
+ s390int->parm, s390int->parm64);
+ inti->type = s390int->type;
+ inti->ext.ext_params = s390int->parm;
+ inti->ext.ext_params2 = s390int->parm64;
+ break;
+ case KVM_S390_INT_SERVICE:
+ VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
+ inti->type = s390int->type;
+ inti->ext.ext_params = s390int->parm;
+ break;
+ case KVM_S390_PROGRAM_INT:
+ case KVM_S390_SIGP_STOP:
+ case KVM_S390_INT_EMERGENCY:
+ default:
+ kfree(inti);
+ return -EINVAL;
+ }
+
+ mutex_lock(&kvm->lock);
+ fi = &kvm->arch.float_int;
+ spin_lock_bh(&fi->lock);
+ list_add_tail(&inti->list, &fi->list);
+ atomic_set(&fi->active, 1);
+ sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
+ if (sigcpu == KVM_MAX_VCPUS) {
+ do {
+ sigcpu = fi->next_rr_cpu++;
+ if (sigcpu == KVM_MAX_VCPUS)
+ sigcpu = fi->next_rr_cpu = 0;
+ } while (fi->local_int[sigcpu] == NULL);
+ }
+ li = fi->local_int[sigcpu];
+ spin_lock_bh(&li->lock);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&li->wq);
+ spin_unlock_bh(&li->lock);
+ spin_unlock_bh(&fi->lock);
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt *s390int)
+{
+ struct local_interrupt *li;
+ struct interrupt_info *inti;
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ switch (s390int->type) {
+ case KVM_S390_PROGRAM_INT:
+ if (s390int->parm & 0xffff0000) {
+ kfree(inti);
+ return -EINVAL;
+ }
+ inti->type = s390int->type;
+ inti->pgm.code = s390int->parm;
+ VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
+ s390int->parm);
+ break;
+ case KVM_S390_SIGP_STOP:
+ case KVM_S390_RESTART:
+ case KVM_S390_SIGP_SET_PREFIX:
+ case KVM_S390_INT_EMERGENCY:
+ VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
+ inti->type = s390int->type;
+ break;
+ case KVM_S390_INT_VIRTIO:
+ case KVM_S390_INT_SERVICE:
+ default:
+ kfree(inti);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vcpu->kvm->lock);
+ li = &vcpu->arch.local_int;
+ spin_lock_bh(&li->lock);
+ if (inti->type == KVM_S390_PROGRAM_INT)
+ list_add(&inti->list, &li->list);
+ else
+ list_add_tail(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ if (inti->type == KVM_S390_SIGP_STOP)
+ li->action_bits |= ACTION_STOP_ON_STOP;
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&vcpu->arch.local_int.wq);
+ spin_unlock_bh(&li->lock);
+ mutex_unlock(&vcpu->kvm->lock);
+ return 0;
+}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
new file mode 100644
index 00000000000..98d1e73e01f
--- /dev/null
+++ b/arch/s390/kvm/kvm-s390.c
@@ -0,0 +1,685 @@
+/*
+ * s390host.c -- hosting zSeries kernel virtual machines
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <asm/lowcore.h>
+#include <asm/pgtable.h>
+
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { "userspace_handled", VCPU_STAT(exit_userspace) },
+ { "exit_validity", VCPU_STAT(exit_validity) },
+ { "exit_stop_request", VCPU_STAT(exit_stop_request) },
+ { "exit_external_request", VCPU_STAT(exit_external_request) },
+ { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
+ { "exit_instruction", VCPU_STAT(exit_instruction) },
+ { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
+ { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
+ { "instruction_lctg", VCPU_STAT(instruction_lctg) },
+ { "instruction_lctl", VCPU_STAT(instruction_lctl) },
+ { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
+ { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
+ { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
+ { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
+ { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
+ { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
+ { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
+ { "exit_wait_state", VCPU_STAT(exit_wait_state) },
+ { "instruction_stidp", VCPU_STAT(instruction_stidp) },
+ { "instruction_spx", VCPU_STAT(instruction_spx) },
+ { "instruction_stpx", VCPU_STAT(instruction_stpx) },
+ { "instruction_stap", VCPU_STAT(instruction_stap) },
+ { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
+ { "instruction_stsch", VCPU_STAT(instruction_stsch) },
+ { "instruction_chsc", VCPU_STAT(instruction_chsc) },
+ { "instruction_stsi", VCPU_STAT(instruction_stsi) },
+ { "instruction_stfl", VCPU_STAT(instruction_stfl) },
+ { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
+ { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
+ { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
+ { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
+ { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
+ { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
+ { "diagnose_44", VCPU_STAT(diagnose_44) },
+ { NULL }
+};
+
+
+/* Section: not file related */
+void kvm_arch_hardware_enable(void *garbage)
+{
+ /* every s390 is virtualization enabled ;-) */
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+}
+
+int kvm_arch_init(void *opaque)
+{
+ return 0;
+}
+
+void kvm_arch_exit(void)
+{
+}
+
+/* Section: device related */
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ if (ioctl == KVM_S390_ENABLE_SIE)
+ return s390_enable_sie();
+ return -EINVAL;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+ return 0;
+}
+
+/* Section: vm related */
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ return 0;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm *kvm = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ int r;
+
+ switch (ioctl) {
+ case KVM_S390_INTERRUPT: {
+ struct kvm_s390_interrupt s390int;
+
+ r = -EFAULT;
+ if (copy_from_user(&s390int, argp, sizeof(s390int)))
+ break;
+ r = kvm_s390_inject_vm(kvm, &s390int);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+struct kvm *kvm_arch_create_vm(void)
+{
+ struct kvm *kvm;
+ int rc;
+ char debug_name[16];
+
+ rc = s390_enable_sie();
+ if (rc)
+ goto out_nokvm;
+
+ rc = -ENOMEM;
+ kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ if (!kvm)
+ goto out_nokvm;
+
+ kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
+ if (!kvm->arch.sca)
+ goto out_nosca;
+
+ sprintf(debug_name, "kvm-%u", current->pid);
+
+ kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
+ if (!kvm->arch.dbf)
+ goto out_nodbf;
+
+ spin_lock_init(&kvm->arch.float_int.lock);
+ INIT_LIST_HEAD(&kvm->arch.float_int.list);
+
+ debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
+ VM_EVENT(kvm, 3, "%s", "vm created");
+
+ try_module_get(THIS_MODULE);
+
+ return kvm;
+out_nodbf:
+ free_page((unsigned long)(kvm->arch.sca));
+out_nosca:
+ kfree(kvm);
+out_nokvm:
+ return ERR_PTR(rc);
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ debug_unregister(kvm->arch.dbf);
+ free_page((unsigned long)(kvm->arch.sca));
+ kfree(kvm);
+ module_put(THIS_MODULE);
+}
+
+/* Section: vcpu related */
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ /* kvm common code refers to this, but does'nt call it */
+ BUG();
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ save_fp_regs(&vcpu->arch.host_fpregs);
+ save_access_regs(vcpu->arch.host_acrs);
+ vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
+ restore_fp_regs(&vcpu->arch.guest_fpregs);
+ restore_access_regs(vcpu->arch.guest_acrs);
+
+ if (signal_pending(current))
+ atomic_set_mask(CPUSTAT_STOP_INT,
+ &vcpu->arch.sie_block->cpuflags);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ save_fp_regs(&vcpu->arch.guest_fpregs);
+ save_access_regs(vcpu->arch.guest_acrs);
+ restore_fp_regs(&vcpu->arch.host_fpregs);
+ restore_access_regs(vcpu->arch.host_acrs);
+}
+
+static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
+{
+ /* this equals initial cpu reset in pop, but we don't switch to ESA */
+ vcpu->arch.sie_block->gpsw.mask = 0UL;
+ vcpu->arch.sie_block->gpsw.addr = 0UL;
+ vcpu->arch.sie_block->prefix = 0UL;
+ vcpu->arch.sie_block->ihcpu = 0xffff;
+ vcpu->arch.sie_block->cputm = 0UL;
+ vcpu->arch.sie_block->ckc = 0UL;
+ vcpu->arch.sie_block->todpr = 0;
+ memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
+ vcpu->arch.sie_block->gcr[0] = 0xE0UL;
+ vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
+ vcpu->arch.guest_fpregs.fpc = 0;
+ asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
+ vcpu->arch.sie_block->gbea = 1;
+}
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
+ vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
+ vcpu->arch.sie_block->gmsor = 0x000000000000;
+ vcpu->arch.sie_block->ecb = 2;
+ vcpu->arch.sie_block->eca = 0xC1002001U;
+ setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
+ (unsigned long) vcpu);
+ get_cpu_id(&vcpu->arch.cpu_id);
+ vcpu->arch.cpu_id.version = 0xfe;
+ return 0;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ unsigned int id)
+{
+ struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+ int rc = -ENOMEM;
+
+ if (!vcpu)
+ goto out_nomem;
+
+ vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
+
+ if (!vcpu->arch.sie_block)
+ goto out_free_cpu;
+
+ vcpu->arch.sie_block->icpua = id;
+ BUG_ON(!kvm->arch.sca);
+ BUG_ON(kvm->arch.sca->cpu[id].sda);
+ kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
+ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
+ vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
+
+ spin_lock_init(&vcpu->arch.local_int.lock);
+ INIT_LIST_HEAD(&vcpu->arch.local_int.list);
+ vcpu->arch.local_int.float_int = &kvm->arch.float_int;
+ spin_lock_bh(&kvm->arch.float_int.lock);
+ kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
+ init_waitqueue_head(&vcpu->arch.local_int.wq);
+ vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
+ spin_unlock_bh(&kvm->arch.float_int.lock);
+
+ rc = kvm_vcpu_init(vcpu, kvm, id);
+ if (rc)
+ goto out_free_cpu;
+ VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
+ vcpu->arch.sie_block);
+
+ try_module_get(THIS_MODULE);
+
+ return vcpu;
+out_free_cpu:
+ kfree(vcpu);
+out_nomem:
+ return ERR_PTR(rc);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
+ free_page((unsigned long)(vcpu->arch.sie_block));
+ kfree(vcpu);
+ module_put(THIS_MODULE);
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ /* kvm common code refers to this, but never calls it */
+ BUG();
+ return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
+{
+ vcpu_load(vcpu);
+ kvm_s390_vcpu_initial_reset(vcpu);
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ vcpu_load(vcpu);
+ memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ vcpu_load(vcpu);
+ memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ vcpu_load(vcpu);
+ memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
+ memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ vcpu_load(vcpu);
+ memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
+ memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ vcpu_load(vcpu);
+ memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+ vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ vcpu_load(vcpu);
+ memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
+ fpu->fpc = vcpu->arch.guest_fpregs.fpc;
+ vcpu_put(vcpu);
+ return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
+{
+ int rc = 0;
+
+ vcpu_load(vcpu);
+ if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
+ rc = -EBUSY;
+ else
+ vcpu->arch.sie_block->gpsw = psw;
+ vcpu_put(vcpu);
+ return rc;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ return -EINVAL; /* not implemented yet */
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
+{
+ return -EINVAL; /* not implemented yet */
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL; /* not implemented yet */
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL; /* not implemented yet */
+}
+
+static void __vcpu_run(struct kvm_vcpu *vcpu)
+{
+ memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
+
+ if (need_resched())
+ schedule();
+
+ vcpu->arch.sie_block->icptcode = 0;
+ local_irq_disable();
+ kvm_guest_enter();
+ local_irq_enable();
+ VCPU_EVENT(vcpu, 6, "entering sie flags %x",
+ atomic_read(&vcpu->arch.sie_block->cpuflags));
+ sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
+ VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+ vcpu->arch.sie_block->icptcode);
+ local_irq_disable();
+ kvm_guest_exit();
+ local_irq_enable();
+
+ memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ int rc;
+ sigset_t sigsaved;
+
+ vcpu_load(vcpu);
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+
+ BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
+
+ switch (kvm_run->exit_reason) {
+ case KVM_EXIT_S390_SIEIC:
+ vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
+ vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
+ break;
+ case KVM_EXIT_UNKNOWN:
+ case KVM_EXIT_S390_RESET:
+ break;
+ default:
+ BUG();
+ }
+
+ might_sleep();
+
+ do {
+ kvm_s390_deliver_pending_interrupts(vcpu);
+ __vcpu_run(vcpu);
+ rc = kvm_handle_sie_intercept(vcpu);
+ } while (!signal_pending(current) && !rc);
+
+ if (signal_pending(current) && !rc)
+ rc = -EINTR;
+
+ if (rc == -ENOTSUPP) {
+ /* intercept cannot be handled in-kernel, prepare kvm-run */
+ kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
+ kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
+ kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
+ kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
+ kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
+ kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
+ rc = 0;
+ }
+
+ if (rc == -EREMOTE) {
+ /* intercept was handled, but userspace support is needed
+ * kvm_run has been prepared by the handler */
+ rc = 0;
+ }
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+ vcpu_put(vcpu);
+
+ vcpu->stat.exit_userspace++;
+ return rc;
+}
+
+static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
+ unsigned long n, int prefix)
+{
+ if (prefix)
+ return copy_to_guest(vcpu, guestdest, from, n);
+ else
+ return copy_to_guest_absolute(vcpu, guestdest, from, n);
+}
+
+/*
+ * store status at address
+ * we use have two special cases:
+ * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
+ * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
+ */
+int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ const unsigned char archmode = 1;
+ int prefix;
+
+ if (addr == KVM_S390_STORE_STATUS_NOADDR) {
+ if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
+ return -EFAULT;
+ addr = SAVE_AREA_BASE;
+ prefix = 0;
+ } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
+ if (copy_to_guest(vcpu, 163ul, &archmode, 1))
+ return -EFAULT;
+ addr = SAVE_AREA_BASE;
+ prefix = 1;
+ } else
+ prefix = 0;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
+ vcpu->arch.guest_fpregs.fprs, 128, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
+ vcpu->arch.guest_gprs, 128, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
+ &vcpu->arch.sie_block->gpsw, 16, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
+ &vcpu->arch.sie_block->prefix, 4, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu,
+ addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
+ &vcpu->arch.guest_fpregs.fpc, 4, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
+ &vcpu->arch.sie_block->todpr, 4, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
+ &vcpu->arch.sie_block->cputm, 8, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
+ &vcpu->arch.sie_block->ckc, 8, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
+ &vcpu->arch.guest_acrs, 64, prefix))
+ return -EFAULT;
+
+ if (__guestcopy(vcpu,
+ addr + offsetof(struct save_area_s390x, ctrl_regs),
+ &vcpu->arch.sie_block->gcr, 128, prefix))
+ return -EFAULT;
+ return 0;
+}
+
+static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ int rc;
+
+ vcpu_load(vcpu);
+ rc = __kvm_s390_vcpu_store_status(vcpu, addr);
+ vcpu_put(vcpu);
+ return rc;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+
+ switch (ioctl) {
+ case KVM_S390_INTERRUPT: {
+ struct kvm_s390_interrupt s390int;
+
+ if (copy_from_user(&s390int, argp, sizeof(s390int)))
+ return -EFAULT;
+ return kvm_s390_inject_vcpu(vcpu, &s390int);
+ }
+ case KVM_S390_STORE_STATUS:
+ return kvm_s390_vcpu_store_status(vcpu, arg);
+ case KVM_S390_SET_INITIAL_PSW: {
+ psw_t psw;
+
+ if (copy_from_user(&psw, argp, sizeof(psw)))
+ return -EFAULT;
+ return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
+ }
+ case KVM_S390_INITIAL_RESET:
+ return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+ default:
+ ;
+ }
+ return -EINVAL;
+}
+
+/* Section: memory related */
+int kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ /* A few sanity checks. We can have exactly one memory slot which has
+ to start at guest virtual zero and which has to be located at a
+ page boundary in userland and which has to end at a page boundary.
+ The memory in userland is ok to be fragmented into various different
+ vmas. It is okay to mmap() and munmap() stuff in this slot after
+ doing this call at any time */
+
+ if (mem->slot)
+ return -EINVAL;
+
+ if (mem->guest_phys_addr)
+ return -EINVAL;
+
+ if (mem->userspace_addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (mem->memory_size & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ kvm->arch.guest_origin = mem->userspace_addr;
+ kvm->arch.guest_memsize = mem->memory_size;
+
+ /* FIXME: we do want to interrupt running CPUs and update their memory
+ configuration now to avoid race conditions. But hey, changing the
+ memory layout while virtual CPUs are running is usually bad
+ programming practice. */
+
+ return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn;
+}
+
+static int __init kvm_s390_init(void)
+{
+ return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+}
+
+static void __exit kvm_s390_exit(void)
+{
+ kvm_exit();
+}
+
+module_init(kvm_s390_init);
+module_exit(kvm_s390_exit);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
new file mode 100644
index 00000000000..3893cf12eac
--- /dev/null
+++ b/arch/s390/kvm/kvm-s390.h
@@ -0,0 +1,64 @@
+/*
+ * kvm_s390.h - definition for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef ARCH_S390_KVM_S390_H
+#define ARCH_S390_KVM_S390_H
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+
+typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
+
+int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
+
+#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
+do { \
+ debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
+ d_args); \
+} while (0)
+
+#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
+do { \
+ debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
+ "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
+ d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
+ d_args); \
+} while (0)
+
+static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
+{
+ return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT;
+}
+
+int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
+void kvm_s390_idle_wakeup(unsigned long data);
+void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
+int kvm_s390_inject_vm(struct kvm *kvm,
+ struct kvm_s390_interrupt *s390int);
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+ struct kvm_s390_interrupt *s390int);
+int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+
+/* implemented in priv.c */
+int kvm_s390_handle_priv(struct kvm_vcpu *vcpu);
+
+/* implemented in sigp.c */
+int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
+
+/* implemented in kvm-s390.c */
+int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu,
+ unsigned long addr);
+/* implemented in diag.c */
+int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
new file mode 100644
index 00000000000..1465946325c
--- /dev/null
+++ b/arch/s390/kvm/priv.c
@@ -0,0 +1,323 @@
+/*
+ * priv.c - handling privileged instructions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm.h>
+#include <linux/errno.h>
+#include <asm/current.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <asm/sysinfo.h>
+#include "gaccess.h"
+#include "kvm-s390.h"
+
+static int handle_set_prefix(struct kvm_vcpu *vcpu)
+{
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 operand2;
+ u32 address = 0;
+ u8 tmp;
+
+ vcpu->stat.instruction_spx++;
+
+ operand2 = disp2;
+ if (base2)
+ operand2 += vcpu->arch.guest_gprs[base2];
+
+ /* must be word boundary */
+ if (operand2 & 3) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ /* get the value */
+ if (get_guest_u32(vcpu, operand2, &address)) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ address = address & 0x7fffe000u;
+
+ /* make sure that the new value is valid memory */
+ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
+ (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ vcpu->arch.sie_block->prefix = address;
+ vcpu->arch.sie_block->ihcpu = 0xffff;
+
+ VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
+out:
+ return 0;
+}
+
+static int handle_store_prefix(struct kvm_vcpu *vcpu)
+{
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 operand2;
+ u32 address;
+
+ vcpu->stat.instruction_stpx++;
+ operand2 = disp2;
+ if (base2)
+ operand2 += vcpu->arch.guest_gprs[base2];
+
+ /* must be word boundary */
+ if (operand2 & 3) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ address = vcpu->arch.sie_block->prefix;
+ address = address & 0x7fffe000u;
+
+ /* get the value */
+ if (put_guest_u32(vcpu, operand2, address)) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
+out:
+ return 0;
+}
+
+static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
+{
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 useraddr;
+ int rc;
+
+ vcpu->stat.instruction_stap++;
+ useraddr = disp2;
+ if (base2)
+ useraddr += vcpu->arch.guest_gprs[base2];
+
+ if (useraddr & 1) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
+ if (rc == -EFAULT) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr);
+out:
+ return 0;
+}
+
+static int handle_skey(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.instruction_storage_key++;
+ vcpu->arch.sie_block->gpsw.addr -= 4;
+ VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
+ return 0;
+}
+
+static int handle_stsch(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.instruction_stsch++;
+ VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
+ /* condition code 3 */
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+ return 0;
+}
+
+static int handle_chsc(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.instruction_chsc++;
+ VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
+ /* condition code 3 */
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+ return 0;
+}
+
+static unsigned int kvm_stfl(void)
+{
+ asm volatile(
+ " .insn s,0xb2b10000,0(0)\n" /* stfl */
+ "0:\n"
+ EX_TABLE(0b, 0b));
+ return S390_lowcore.stfl_fac_list;
+}
+
+static int handle_stfl(struct kvm_vcpu *vcpu)
+{
+ unsigned int facility_list = kvm_stfl();
+ int rc;
+
+ vcpu->stat.instruction_stfl++;
+ facility_list &= ~(1UL<<24); /* no stfle */
+
+ rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
+ &facility_list, sizeof(facility_list));
+ if (rc == -EFAULT)
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ else
+ VCPU_EVENT(vcpu, 5, "store facility list value %x",
+ facility_list);
+ return 0;
+}
+
+static int handle_stidp(struct kvm_vcpu *vcpu)
+{
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 operand2;
+ int rc;
+
+ vcpu->stat.instruction_stidp++;
+ operand2 = disp2;
+ if (base2)
+ operand2 += vcpu->arch.guest_gprs[base2];
+
+ if (operand2 & 7) {
+ kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ goto out;
+ }
+
+ rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
+ if (rc == -EFAULT) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out;
+ }
+
+ VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
+out:
+ return 0;
+}
+
+static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ int cpus = 0;
+ int n;
+
+ spin_lock_bh(&fi->lock);
+ for (n = 0; n < KVM_MAX_VCPUS; n++)
+ if (fi->local_int[n])
+ cpus++;
+ spin_unlock_bh(&fi->lock);
+
+ /* deal with other level 3 hypervisors */
+ if (stsi(mem, 3, 2, 2) == -ENOSYS)
+ mem->count = 0;
+ if (mem->count < 8)
+ mem->count++;
+ for (n = mem->count - 1; n > 0 ; n--)
+ memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+
+ mem->vm[0].cpus_total = cpus;
+ mem->vm[0].cpus_configured = cpus;
+ mem->vm[0].cpus_standby = 0;
+ mem->vm[0].cpus_reserved = 0;
+ mem->vm[0].caf = 1000;
+ memcpy(mem->vm[0].name, "KVMguest", 8);
+ ASCEBC(mem->vm[0].name, 8);
+ memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
+ ASCEBC(mem->vm[0].cpi, 16);
+}
+
+static int handle_stsi(struct kvm_vcpu *vcpu)
+{
+ int fc = (vcpu->arch.guest_gprs[0] & 0xf0000000) >> 28;
+ int sel1 = vcpu->arch.guest_gprs[0] & 0xff;
+ int sel2 = vcpu->arch.guest_gprs[1] & 0xffff;
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u64 operand2;
+ unsigned long mem;
+
+ vcpu->stat.instruction_stsi++;
+ VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
+
+ operand2 = disp2;
+ if (base2)
+ operand2 += vcpu->arch.guest_gprs[base2];
+
+ if (operand2 & 0xfff && fc > 0)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ switch (fc) {
+ case 0:
+ vcpu->arch.guest_gprs[0] = 3 << 28;
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ return 0;
+ case 1: /* same handling for 1 and 2 */
+ case 2:
+ mem = get_zeroed_page(GFP_KERNEL);
+ if (!mem)
+ goto out_fail;
+ if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS)
+ goto out_mem;
+ break;
+ case 3:
+ if (sel1 != 2 || sel2 != 2)
+ goto out_fail;
+ mem = get_zeroed_page(GFP_KERNEL);
+ if (!mem)
+ goto out_fail;
+ handle_stsi_3_2_2(vcpu, (void *) mem);
+ break;
+ default:
+ goto out_fail;
+ }
+
+ if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ goto out_mem;
+ }
+ free_page(mem);
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.guest_gprs[0] = 0;
+ return 0;
+out_mem:
+ free_page(mem);
+out_fail:
+ /* condition code 3 */
+ vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
+ return 0;
+}
+
+static intercept_handler_t priv_handlers[256] = {
+ [0x02] = handle_stidp,
+ [0x10] = handle_set_prefix,
+ [0x11] = handle_store_prefix,
+ [0x12] = handle_store_cpu_address,
+ [0x29] = handle_skey,
+ [0x2a] = handle_skey,
+ [0x2b] = handle_skey,
+ [0x34] = handle_stsch,
+ [0x5f] = handle_chsc,
+ [0x7d] = handle_stsi,
+ [0xb1] = handle_stfl,
+};
+
+int kvm_s390_handle_priv(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+ if (handler)
+ return handler(vcpu);
+ return -ENOTSUPP;
+}
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
new file mode 100644
index 00000000000..934fd6a885f
--- /dev/null
+++ b/arch/s390/kvm/sie64a.S
@@ -0,0 +1,47 @@
+/*
+ * sie64a.S - low level sie call
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <asm/asm-offsets.h>
+
+SP_R5 = 5 * 8 # offset into stackframe
+SP_R6 = 6 * 8
+
+/*
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
+ */
+ .globl sie64a
+sie64a:
+ lgr %r5,%r3
+ stmg %r5,%r14,SP_R5(%r15) # save register on entry
+ lgr %r14,%r2 # pointer to sie control block
+ lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+sie_inst:
+ sie 0(%r14)
+ lg %r14,SP_R5(%r15)
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lghi %r2,0
+ lmg %r6,%r14,SP_R6(%r15)
+ br %r14
+
+sie_err:
+ lg %r14,SP_R5(%r15)
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lghi %r2,-EFAULT
+ lmg %r6,%r14,SP_R6(%r15)
+ br %r14
+
+ .section __ex_table,"a"
+ .quad sie_inst,sie_err
+ .previous
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
new file mode 100644
index 00000000000..0a236acfb5f
--- /dev/null
+++ b/arch/s390/kvm/sigp.c
@@ -0,0 +1,288 @@
+/*
+ * sigp.c - handlinge interprocessor communication
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include "gaccess.h"
+#include "kvm-s390.h"
+
+/* sigp order codes */
+#define SIGP_SENSE 0x01
+#define SIGP_EXTERNAL_CALL 0x02
+#define SIGP_EMERGENCY 0x03
+#define SIGP_START 0x04
+#define SIGP_STOP 0x05
+#define SIGP_RESTART 0x06
+#define SIGP_STOP_STORE_STATUS 0x09
+#define SIGP_INITIAL_CPU_RESET 0x0b
+#define SIGP_CPU_RESET 0x0c
+#define SIGP_SET_PREFIX 0x0d
+#define SIGP_STORE_STATUS_ADDR 0x0e
+#define SIGP_SET_ARCH 0x12
+
+/* cpu status bits */
+#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
+#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
+#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
+#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
+#define SIGP_STAT_STOPPED 0x00000040UL
+#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
+#define SIGP_STAT_CHECK_STOP 0x00000010UL
+#define SIGP_STAT_INOPERATIVE 0x00000004UL
+#define SIGP_STAT_INVALID_ORDER 0x00000002UL
+#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
+
+
+static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ int rc;
+
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return 3; /* not operational */
+
+ spin_lock_bh(&fi->lock);
+ if (fi->local_int[cpu_addr] == NULL)
+ rc = 3; /* not operational */
+ else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
+ & CPUSTAT_RUNNING) {
+ *reg &= 0xffffffff00000000UL;
+ rc = 1; /* status stored */
+ } else {
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STAT_STOPPED;
+ rc = 1; /* status stored */
+ }
+ spin_unlock_bh(&fi->lock);
+
+ VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
+ return rc;
+}
+
+static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct local_interrupt *li;
+ struct interrupt_info *inti;
+ int rc;
+
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return 3; /* not operational */
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ inti->type = KVM_S390_INT_EMERGENCY;
+
+ spin_lock_bh(&fi->lock);
+ li = fi->local_int[cpu_addr];
+ if (li == NULL) {
+ rc = 3; /* not operational */
+ kfree(inti);
+ goto unlock;
+ }
+ spin_lock_bh(&li->lock);
+ list_add_tail(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&li->wq);
+ spin_unlock_bh(&li->lock);
+ rc = 0; /* order accepted */
+unlock:
+ spin_unlock_bh(&fi->lock);
+ VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
+ return rc;
+}
+
+static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct local_interrupt *li;
+ struct interrupt_info *inti;
+ int rc;
+
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return 3; /* not operational */
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return -ENOMEM;
+
+ inti->type = KVM_S390_SIGP_STOP;
+
+ spin_lock_bh(&fi->lock);
+ li = fi->local_int[cpu_addr];
+ if (li == NULL) {
+ rc = 3; /* not operational */
+ kfree(inti);
+ goto unlock;
+ }
+ spin_lock_bh(&li->lock);
+ list_add_tail(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+ if (store)
+ li->action_bits |= ACTION_STORE_ON_STOP;
+ li->action_bits |= ACTION_STOP_ON_STOP;
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&li->wq);
+ spin_unlock_bh(&li->lock);
+ rc = 0; /* order accepted */
+unlock:
+ spin_unlock_bh(&fi->lock);
+ VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
+ return rc;
+}
+
+static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
+{
+ int rc;
+
+ switch (parameter & 0xff) {
+ case 0:
+ printk(KERN_WARNING "kvm: request to switch to ESA/390 mode"
+ " not supported");
+ rc = 3; /* not operational */
+ break;
+ case 1:
+ case 2:
+ rc = 0; /* order accepted */
+ break;
+ default:
+ rc = -ENOTSUPP;
+ }
+ return rc;
+}
+
+static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
+ u64 *reg)
+{
+ struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct local_interrupt *li;
+ struct interrupt_info *inti;
+ int rc;
+ u8 tmp;
+
+ /* make sure that the new value is valid memory */
+ address = address & 0x7fffe000u;
+ if ((copy_from_guest(vcpu, &tmp,
+ (u64) (address + vcpu->kvm->arch.guest_origin) , 1)) ||
+ (copy_from_guest(vcpu, &tmp, (u64) (address +
+ vcpu->kvm->arch.guest_origin + PAGE_SIZE), 1))) {
+ *reg |= SIGP_STAT_INVALID_PARAMETER;
+ return 1; /* invalid parameter */
+ }
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+ return 2; /* busy */
+
+ spin_lock_bh(&fi->lock);
+ li = fi->local_int[cpu_addr];
+
+ if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
+ rc = 1; /* incorrect state */
+ *reg &= SIGP_STAT_INCORRECT_STATE;
+ kfree(inti);
+ goto out_fi;
+ }
+
+ spin_lock_bh(&li->lock);
+ /* cpu must be in stopped state */
+ if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
+ rc = 1; /* incorrect state */
+ *reg &= SIGP_STAT_INCORRECT_STATE;
+ kfree(inti);
+ goto out_li;
+ }
+
+ inti->type = KVM_S390_SIGP_SET_PREFIX;
+ inti->prefix.address = address;
+
+ list_add_tail(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+ if (waitqueue_active(&li->wq))
+ wake_up_interruptible(&li->wq);
+ rc = 0; /* order accepted */
+
+ VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
+out_li:
+ spin_unlock_bh(&li->lock);
+out_fi:
+ spin_unlock_bh(&fi->lock);
+ return rc;
+}
+
+int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
+{
+ int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+ int r3 = vcpu->arch.sie_block->ipa & 0x000f;
+ int base2 = vcpu->arch.sie_block->ipb >> 28;
+ int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+ u32 parameter;
+ u16 cpu_addr = vcpu->arch.guest_gprs[r3];
+ u8 order_code;
+ int rc;
+
+ order_code = disp2;
+ if (base2)
+ order_code += vcpu->arch.guest_gprs[base2];
+
+ if (r1 % 2)
+ parameter = vcpu->arch.guest_gprs[r1];
+ else
+ parameter = vcpu->arch.guest_gprs[r1 + 1];
+
+ switch (order_code) {
+ case SIGP_SENSE:
+ vcpu->stat.instruction_sigp_sense++;
+ rc = __sigp_sense(vcpu, cpu_addr,
+ &vcpu->arch.guest_gprs[r1]);
+ break;
+ case SIGP_EMERGENCY:
+ vcpu->stat.instruction_sigp_emergency++;
+ rc = __sigp_emergency(vcpu, cpu_addr);
+ break;
+ case SIGP_STOP:
+ vcpu->stat.instruction_sigp_stop++;
+ rc = __sigp_stop(vcpu, cpu_addr, 0);
+ break;
+ case SIGP_STOP_STORE_STATUS:
+ vcpu->stat.instruction_sigp_stop++;
+ rc = __sigp_stop(vcpu, cpu_addr, 1);
+ break;
+ case SIGP_SET_ARCH:
+ vcpu->stat.instruction_sigp_arch++;
+ rc = __sigp_set_arch(vcpu, parameter);
+ break;
+ case SIGP_SET_PREFIX:
+ vcpu->stat.instruction_sigp_prefix++;
+ rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
+ &vcpu->arch.guest_gprs[r1]);
+ break;
+ case SIGP_RESTART:
+ vcpu->stat.instruction_sigp_restart++;
+ /* user space must know about restart */
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (rc < 0)
+ return rc;
+
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
+ return 0;
+}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index fd072013f88..5c1aea97cd1 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -30,11 +30,27 @@
#define TABLES_PER_PAGE 4
#define FRAG_MASK 15UL
#define SECOND_HALVES 10UL
+
+void clear_table_pgstes(unsigned long *table)
+{
+ clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
+ memset(table + 256, 0, PAGE_SIZE/4);
+ clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
+ memset(table + 768, 0, PAGE_SIZE/4);
+}
+
#else
#define ALLOC_ORDER 2
#define TABLES_PER_PAGE 2
#define FRAG_MASK 3UL
#define SECOND_HALVES 2UL
+
+void clear_table_pgstes(unsigned long *table)
+{
+ clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
+ memset(table + 256, 0, PAGE_SIZE/2);
+}
+
#endif
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
@@ -153,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long *table;
unsigned long bits;
- bits = mm->context.noexec ? 3UL : 1UL;
+ bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
spin_lock(&mm->page_table_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
@@ -170,7 +186,10 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK;
table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
+ if (mm->context.pgstes)
+ clear_table_pgstes(table);
+ else
+ clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
spin_lock(&mm->page_table_lock);
list_add(&page->lru, &mm->context.pgtable_list);
}
@@ -191,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned long bits;
- bits = mm->context.noexec ? 3UL : 1UL;
+ bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
@@ -228,3 +247,43 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
mm->context.noexec = 0;
update_mm(mm, tsk);
}
+
+/*
+ * switch on pgstes for its userspace process (for kvm)
+ */
+int s390_enable_sie(void)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm;
+ int rc;
+
+ task_lock(tsk);
+
+ rc = 0;
+ if (tsk->mm->context.pgstes)
+ goto unlock;
+
+ rc = -EINVAL;
+ if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
+ tsk->mm != tsk->active_mm || tsk->mm->ioctx_list)
+ goto unlock;
+
+ tsk->mm->context.pgstes = 1; /* dirty little tricks .. */
+ mm = dup_mm(tsk);
+ tsk->mm->context.pgstes = 0;
+
+ rc = -ENOMEM;
+ if (!mm)
+ goto unlock;
+ mmput(tsk->mm);
+ tsk->mm = tsk->active_mm = mm;
+ preempt_disable();
+ update_mm(mm, tsk);
+ cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+ preempt_enable();
+ rc = 0;
+unlock:
+ task_unlock(tsk);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(s390_enable_sie);
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
index 3fbe69e359e..5696e7b374b 100644
--- a/arch/um/Kconfig.x86_64
+++ b/arch/um/Kconfig.x86_64
@@ -1,3 +1,10 @@
+
+menu "Host processor type and features"
+
+source "arch/x86/Kconfig.cpu"
+
+endmenu
+
config UML_X86
bool
default y
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index f4bd349d441..f25c29a12d0 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -14,6 +14,7 @@
#include "os.h"
#include "um_malloc.h"
#include "user.h"
+#include <linux/limits.h>
struct helper_data {
void (*pre_exec)(void*);
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index 964dc1a04c3..598b5c1903a 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -6,7 +6,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
sys_call_table.o tls.o
-subarch-obj-y = lib/bitops_32.o lib/semaphore_32.o lib/string_32.o
+subarch-obj-y = lib/semaphore_32.o lib/string_32.o
subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index 3c22de53208..c8b4cce9cfe 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -10,7 +10,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
obj-$(CONFIG_MODULES) += um_module.o
-subarch-obj-y = lib/bitops_64.o lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
+subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o
ldt-y = ../sys-i386/ldt.o
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4d350b5cbc7..e5790fe9e33 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -142,6 +142,9 @@ config AUDIT_ARCH
config ARCH_SUPPORTS_AOUT
def_bool y
+config ARCH_SUPPORTS_OPTIMIZED_INLINING
+ def_bool y
+
# Use the generic interrupt handling code in kernel/irq/:
config GENERIC_HARDIRQS
bool
@@ -370,6 +373,25 @@ config VMI
at the moment), by linking the kernel to a GPL-ed ROM module
provided by the hypervisor.
+config KVM_CLOCK
+ bool "KVM paravirtualized clock"
+ select PARAVIRT
+ depends on !(X86_VISWS || X86_VOYAGER)
+ help
+ Turning on this option will allow you to run a paravirtualized clock
+ when running over the KVM hypervisor. Instead of relying on a PIT
+ (or probably other) emulation by the underlying device model, the host
+ provides the guest with timing infrastructure such as time of day, and
+ system time
+
+config KVM_GUEST
+ bool "KVM Guest support"
+ select PARAVIRT
+ depends on !(X86_VISWS || X86_VOYAGER)
+ help
+ This option enables various optimizations for running under the KVM
+ hypervisor.
+
source "arch/x86/lguest/Kconfig"
config PARAVIRT
@@ -1049,9 +1071,9 @@ config MTRR
See <file:Documentation/mtrr.txt> for more information.
config X86_PAT
- def_bool y
+ bool
prompt "x86 PAT support"
- depends on MTRR && NONPROMISC_DEVMEM
+ depends on MTRR
help
Use PAT attributes to setup page level cache control.
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 57072f2716f..7ef18b01f0b 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -21,8 +21,8 @@ config M386
Here are the settings recommended for greatest speed:
- "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
- 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
- will run on a 386 class machine.
+ 486DLC/DLC2, and UMC 486SX-S. Only "386" kernels will run on a 386
+ class machine.
- "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
- "586" for generic Pentium CPUs lacking the TSC
@@ -278,6 +278,11 @@ config GENERIC_CPU
endchoice
+config X86_CPU
+ def_bool y
+ select GENERIC_FIND_FIRST_BIT
+ select GENERIC_FIND_NEXT_BIT
+
config X86_GENERIC
bool "Generic x86 support"
depends on X86_32
@@ -398,7 +403,7 @@ config X86_TSC
# generates cmov.
config X86_CMOV
def_bool y
- depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7)
+ depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || X86_64)
config X86_MINIMUM_CPU_FAMILY
int
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 239fd9fba0a..5b1979a45a1 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -257,3 +257,16 @@ config CPA_DEBUG
Do change_page_attr() self-tests every 30 seconds.
endmenu
+
+config OPTIMIZE_INLINING
+ bool "Allow gcc to uninline functions marked 'inline'"
+ default y
+ help
+ This option determines if the kernel forces gcc to inline the functions
+ developers have marked 'inline'. Doing so takes away freedom from gcc to
+ do what it thinks is best, which is desirable for the gcc 3.x series of
+ compilers. The gcc 4.x series have a rewritten inlining algorithm and
+ disabling this option will generate a smaller kernel there. Hopefully
+ this algorithm is so good that allowing gcc4 to make the decision can
+ become the default in the future, until then this option is there to
+ test gcc for this.
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 6d2df8d61c5..af86e431acf 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -120,7 +120,7 @@ _start:
# Part 2 of the header, from the old setup.S
.ascii "HdrS" # header signature
- .word 0x0208 # header version number (>= 0x0105)
+ .word 0x0209 # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
.globl realmode_swtch
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -227,6 +227,10 @@ hardware_subarch_data: .quad 0
payload_offset: .long input_data
payload_length: .long input_data_end-input_data
+setup_data: .quad 0 # 64-bit physical pointer to
+ # single linked list of
+ # struct setup_data
+
# End of setup header #####################################################
.section ".inittext", "ax"
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 3df340b54e5..ad7ddaaff58 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1421,6 +1421,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_FRAME_POINTER is not set
+CONFIG_OPTIMIZE_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index eef98cb00c6..2d6f5b2809d 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1346,6 +1346,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_FRAME_POINTER is not set
+CONFIG_OPTIMIZE_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 05e155d3fb6..bbed3a26ce5 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -499,11 +499,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
regs->cs = __USER32_CS;
regs->ss = __USER32_DS;
- set_fs(USER_DS);
- regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-
#if DEBUG_SIG
printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
current->comm, current->pid, frame, regs->ip, frame->pretcode);
@@ -599,11 +594,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->cs = __USER32_CS;
regs->ss = __USER32_DS;
- set_fs(USER_DS);
- regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-
#if DEBUG_SIG
printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
current->comm, current->pid, frame, regs->ip, frame->pretcode);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index ae7158bce4d..b5e329da166 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -430,7 +430,7 @@ ia32_sys_call_table:
.quad sys_setuid16
.quad sys_getuid16
.quad compat_sys_stime /* stime */ /* 25 */
- .quad sys32_ptrace /* ptrace */
+ .quad compat_sys_ptrace /* ptrace */
.quad sys_alarm
.quad sys_fstat /* (old)fstat */
.quad sys_pause
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 90e092d0af0..fa19c381954 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -80,6 +80,8 @@ obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
+obj-$(CONFIG_KVM_GUEST) += kvm.o
+obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
ifdef CONFIG_INPUT_PCSPKR
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 057ccf1d5ad..977ed5cdeaa 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -697,10 +697,6 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
#define HPET_RESOURCE_NAME_SIZE 9
hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
- if (!hpet_res)
- return 0;
-
- memset(hpet_res, 0, sizeof(*hpet_res));
hpet_res->name = (void *)&hpet_res[1];
hpet_res->flags = IORESOURCE_MEM;
snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 8317401170b..4b99b1bdeb6 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -451,7 +451,8 @@ void __init setup_boot_APIC_clock(void)
}
/* Calculate the scaled math multiplication factor */
- lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, 32);
+ lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
+ lapic_clockevent.shift);
lapic_clockevent.max_delta_ns =
clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
lapic_clockevent.min_delta_ns =
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index bf83157337e..5910020c3f2 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -360,7 +360,8 @@ static void __init calibrate_APIC_clock(void)
result / 1000 / 1000, result / 1000 % 1000);
/* Calculate the scaled math multiplication factor */
- lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
+ lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
+ lapic_clockevent.shift);
lapic_clockevent.max_delta_ns =
clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
lapic_clockevent.min_delta_ns =
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f0030a0999c..e4ea362e848 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -904,6 +904,7 @@ recalc:
original_pm_idle();
else
default_idle();
+ local_irq_disable();
jiffies_since_last_check = jiffies - last_jiffies;
if (jiffies_since_last_check > idle_period)
goto recalc;
@@ -911,6 +912,8 @@ recalc:
if (apm_idle_done)
apm_do_busy();
+
+ local_irq_enable();
}
/**
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index ee7c45235e5..a0c6f819088 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_X86_32) += cyrix.o
obj-$(CONFIG_X86_32) += centaur.o
obj-$(CONFIG_X86_32) += transmeta.o
obj-$(CONFIG_X86_32) += intel.o
-obj-$(CONFIG_X86_32) += nexgen.o
obj-$(CONFIG_X86_32) += umc.o
obj-$(CONFIG_X86_MCE) += mcheck/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 0173065dc3b..24586682829 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -343,10 +343,4 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
.c_size_cache = amd_size_cache,
};
-int __init amd_init_cpu(void)
-{
- cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
- return 0;
-}
-
cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 9a699ed0359..e07e8c068ae 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -49,7 +49,7 @@ static int banks;
static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
static unsigned long notify_user;
static int rip_msr;
-static int mce_bootlog = 1;
+static int mce_bootlog = -1;
static atomic_t mce_events;
static char trigger[128];
@@ -471,13 +471,15 @@ static void mce_init(void *dummy)
static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
{
/* This should be disabled by the BIOS, but isn't always */
- if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
- /* disable GART TBL walk error reporting, which trips off
- incorrectly with the IOMMU & 3ware & Cerberus. */
- clear_bit(10, &bank[4]);
- /* Lots of broken BIOS around that don't clear them
- by default and leave crap in there. Don't log. */
- mce_bootlog = 0;
+ if (c->x86_vendor == X86_VENDOR_AMD) {
+ if(c->x86 == 15)
+ /* disable GART TBL walk error reporting, which trips off
+ incorrectly with the IOMMU & 3ware & Cerberus. */
+ clear_bit(10, &bank[4]);
+ if(c->x86 <= 17 && mce_bootlog < 0)
+ /* Lots of broken BIOS around that don't clear them
+ by default and leave crap in there. Don't log. */
+ mce_bootlog = 0;
}
}
diff --git a/arch/x86/kernel/cpu/nexgen.c b/arch/x86/kernel/cpu/nexgen.c
deleted file mode 100644
index 5d5e1c13412..00000000000
--- a/arch/x86/kernel/cpu/nexgen.c
+++ /dev/null
@@ -1,59 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <asm/processor.h>
-
-#include "cpu.h"
-
-/*
- * Detect a NexGen CPU running without BIOS hypercode new enough
- * to have CPUID. (Thanks to Herbert Oppmann)
- */
-
-static int __cpuinit deep_magic_nexgen_probe(void)
-{
- int ret;
-
- __asm__ __volatile__ (
- " movw $0x5555, %%ax\n"
- " xorw %%dx,%%dx\n"
- " movw $2, %%cx\n"
- " divw %%cx\n"
- " movl $0, %%eax\n"
- " jnz 1f\n"
- " movl $1, %%eax\n"
- "1:\n"
- : "=a" (ret) : : "cx", "dx");
- return ret;
-}
-
-static void __cpuinit init_nexgen(struct cpuinfo_x86 *c)
-{
- c->x86_cache_size = 256; /* A few had 1 MB... */
-}
-
-static void __cpuinit nexgen_identify(struct cpuinfo_x86 *c)
-{
- /* Detect NexGen with old hypercode */
- if (deep_magic_nexgen_probe())
- strcpy(c->x86_vendor_id, "NexGenDriven");
-}
-
-static struct cpu_dev nexgen_cpu_dev __cpuinitdata = {
- .c_vendor = "Nexgen",
- .c_ident = { "NexGenDriven" },
- .c_models = {
- { .vendor = X86_VENDOR_NEXGEN,
- .family = 5,
- .model_names = { [1] = "Nx586" }
- },
- },
- .c_init = init_nexgen,
- .c_identify = nexgen_identify,
-};
-
-int __init nexgen_init_cpu(void)
-{
- cpu_devs[X86_VENDOR_NEXGEN] = &nexgen_cpu_dev;
- return 0;
-}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index b943e10ad81..f9ae93adffe 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -614,16 +614,6 @@ static struct wd_ops intel_arch_wd_ops __read_mostly = {
.evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
};
-static struct wd_ops coreduo_wd_ops = {
- .reserve = single_msr_reserve,
- .unreserve = single_msr_unreserve,
- .setup = setup_intel_arch_watchdog,
- .rearm = p6_rearm,
- .stop = single_msr_stop_watchdog,
- .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
-};
-
static void probe_nmi_watchdog(void)
{
switch (boot_cpu_data.x86_vendor) {
@@ -637,8 +627,8 @@ static void probe_nmi_watchdog(void)
/* Work around Core Duo (Yonah) errata AE49 where perfctr1
doesn't have a working enable bit. */
if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
- wd_ops = &coreduo_wd_ops;
- break;
+ intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0;
+ intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0;
}
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
wd_ops = &intel_arch_wd_ops;
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 2251d0ae957..26855381790 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -25,6 +25,7 @@
#include <asm/hpet.h>
#include <linux/kdebug.h>
#include <asm/smp.h>
+#include <asm/reboot.h>
#include <mach_ipi.h>
@@ -117,7 +118,7 @@ static void nmi_shootdown_cpus(void)
}
#endif
-void machine_crash_shutdown(struct pt_regs *regs)
+void native_machine_crash_shutdown(struct pt_regs *regs)
{
/* This function is only called after the system
* has panicked or is otherwise in a critical state.
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index cbd42e51cb0..645ee5e32a2 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -84,14 +84,41 @@ void __init reserve_early(unsigned long start, unsigned long end, char *name)
strncpy(r->name, name, sizeof(r->name) - 1);
}
-void __init early_res_to_bootmem(void)
+void __init free_early(unsigned long start, unsigned long end)
+{
+ struct early_res *r;
+ int i, j;
+
+ for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
+ r = &early_res[i];
+ if (start == r->start && end == r->end)
+ break;
+ }
+ if (i >= MAX_EARLY_RES || !early_res[i].end)
+ panic("free_early on not reserved area: %lx-%lx!", start, end);
+
+ for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
+ ;
+
+ memcpy(&early_res[i], &early_res[i + 1],
+ (j - 1 - i) * sizeof(struct early_res));
+
+ early_res[j - 1].end = 0;
+}
+
+void __init early_res_to_bootmem(unsigned long start, unsigned long end)
{
int i;
+ unsigned long final_start, final_end;
for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
struct early_res *r = &early_res[i];
- printk(KERN_INFO "early res: %d [%lx-%lx] %s\n", i,
- r->start, r->end - 1, r->name);
- reserve_bootmem_generic(r->start, r->end - r->start);
+ final_start = max(start, r->start);
+ final_end = min(end, r->end);
+ if (final_start >= final_end)
+ continue;
+ printk(KERN_INFO " early res: %d [%lx-%lx] %s\n", i,
+ final_start, final_end - 1, r->name);
+ reserve_bootmem_generic(final_start, final_end - final_start);
}
}
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 9546ef408b9..021624c8358 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -51,7 +51,7 @@ void __init setup_apic_routing(void)
else
#endif
- if (cpus_weight(cpu_possible_map) <= 8)
+ if (num_possible_cpus() <= 8)
genapic = &apic_flat;
else
genapic = &apic_physflat;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 993c7677325..e25c57b8aa8 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -11,6 +11,7 @@
#include <linux/string.h>
#include <linux/percpu.h>
#include <linux/start_kernel.h>
+#include <linux/io.h>
#include <asm/processor.h>
#include <asm/proto.h>
@@ -22,6 +23,7 @@
#include <asm/sections.h>
#include <asm/kdebug.h>
#include <asm/e820.h>
+#include <asm/bios_ebda.h>
static void __init zap_identity_mappings(void)
{
@@ -49,7 +51,6 @@ static void __init copy_bootdata(char *real_mode_data)
}
}
-#define BIOS_EBDA_SEGMENT 0x40E
#define BIOS_LOWMEM_KILOBYTES 0x413
/*
@@ -80,8 +81,7 @@ static void __init reserve_ebda_region(void)
lowmem <<= 10;
/* start of EBDA area */
- ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT);
- ebda_addr <<= 4;
+ ebda_addr = get_bios_ebda();
/* Fixup: bios puts an EBDA in the top 64K segment */
/* of conventional memory, but does not adjust lowmem. */
@@ -101,6 +101,24 @@ static void __init reserve_ebda_region(void)
reserve_early(lowmem, 0x100000, "BIOS reserved");
}
+static void __init reserve_setup_data(void)
+{
+ struct setup_data *data;
+ unsigned long pa_data;
+ char buf[32];
+
+ if (boot_params.hdr.version < 0x0209)
+ return;
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = early_ioremap(pa_data, sizeof(*data));
+ sprintf(buf, "setup data %x", data->type);
+ reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
+ pa_data = data->next;
+ early_iounmap(data, sizeof(*data));
+ }
+}
+
void __init x86_64_start_kernel(char * real_mode_data)
{
int i;
@@ -157,6 +175,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
#endif
reserve_ebda_region();
+ reserve_setup_data();
/*
* At this point everything still needed from the boot loader
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 36652ea1a26..9007f9ea64e 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -218,7 +218,7 @@ static void hpet_legacy_clockevent_register(void)
hpet_freq = 1000000000000000ULL;
do_div(hpet_freq, hpet_period);
hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
- NSEC_PER_SEC, 32);
+ NSEC_PER_SEC, hpet_clockevent.shift);
/* Calculate the min / max delta */
hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
&hpet_clockevent);
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 8540abe86ad..c1b5e3ece1f 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -115,7 +115,8 @@ void __init setup_pit_timer(void)
* IO_APIC has been initialized.
*/
pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
- pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32);
+ pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC,
+ pit_clockevent.shift);
pit_clockevent.max_delta_ns =
clockevent_delta2ns(0x7FFF, &pit_clockevent);
pit_clockevent.min_delta_ns =
@@ -224,7 +225,8 @@ static int __init init_pit_clocksource(void)
pit_clockevent.mode != CLOCK_EVT_MODE_PERIODIC)
return 0;
- clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
+ clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE,
+ clocksource_pit.shift);
return clocksource_register(&clocksource_pit);
}
arch_initcall(init_pit_clocksource);
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 2e2f42074e1..696b8e4e66b 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -2068,7 +2068,7 @@ static void __init setup_nmi(void)
* cycles as some i82489DX-based boards have glue logic that keeps the
* 8259A interrupt line asserted until INTA. --macro
*/
-static inline void unlock_ExtINT_logic(void)
+static inline void __init unlock_ExtINT_logic(void)
{
int apic, pin, i;
struct IO_APIC_route_entry entry0, entry1;
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 9ba11d07920..ef1a8dfcc52 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -1599,7 +1599,7 @@ static void __init setup_nmi(void)
* cycles as some i82489DX-based boards have glue logic that keeps the
* 8259A interrupt line asserted until INTA. --macro
*/
-static inline void unlock_ExtINT_logic(void)
+static inline void __init unlock_ExtINT_logic(void)
{
int apic, pin, i;
struct IO_APIC_route_entry entry0, entry1;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 6ea67b76a21..00bda7bcda6 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -134,7 +134,7 @@ unsigned int do_IRQ(struct pt_regs *regs)
: "=a" (arg1), "=d" (arg2), "=b" (bx)
: "0" (irq), "1" (desc), "2" (isp),
"D" (desc->handle_irq)
- : "memory", "cc"
+ : "memory", "cc", "ecx"
);
} else
#endif
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 73354302fda..c0320599171 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -6,23 +6,171 @@
*
* This file is released under the GPLv2.
*/
-
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
#include <linux/stat.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mm.h>
#include <asm/setup.h>
#ifdef CONFIG_DEBUG_BOOT_PARAMS
+struct setup_data_node {
+ u64 paddr;
+ u32 type;
+ u32 len;
+};
+
+static ssize_t
+setup_data_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct setup_data_node *node = file->private_data;
+ unsigned long remain;
+ loff_t pos = *ppos;
+ struct page *pg;
+ void *p;
+ u64 pa;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= node->len)
+ return 0;
+
+ if (count > node->len - pos)
+ count = node->len - pos;
+ pa = node->paddr + sizeof(struct setup_data) + pos;
+ pg = pfn_to_page((pa + count - 1) >> PAGE_SHIFT);
+ if (PageHighMem(pg)) {
+ p = ioremap_cache(pa, count);
+ if (!p)
+ return -ENXIO;
+ } else {
+ p = __va(pa);
+ }
+
+ remain = copy_to_user(user_buf, p, count);
+
+ if (PageHighMem(pg))
+ iounmap(p);
+
+ if (remain)
+ return -EFAULT;
+
+ *ppos = pos + count;
+
+ return count;
+}
+
+static int setup_data_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations fops_setup_data = {
+ .read = setup_data_read,
+ .open = setup_data_open,
+};
+
+static int __init
+create_setup_data_node(struct dentry *parent, int no,
+ struct setup_data_node *node)
+{
+ struct dentry *d, *type, *data;
+ char buf[16];
+ int error;
+
+ sprintf(buf, "%d", no);
+ d = debugfs_create_dir(buf, parent);
+ if (!d) {
+ error = -ENOMEM;
+ goto err_return;
+ }
+ type = debugfs_create_x32("type", S_IRUGO, d, &node->type);
+ if (!type) {
+ error = -ENOMEM;
+ goto err_dir;
+ }
+ data = debugfs_create_file("data", S_IRUGO, d, node, &fops_setup_data);
+ if (!data) {
+ error = -ENOMEM;
+ goto err_type;
+ }
+ return 0;
+
+err_type:
+ debugfs_remove(type);
+err_dir:
+ debugfs_remove(d);
+err_return:
+ return error;
+}
+
+static int __init create_setup_data_nodes(struct dentry *parent)
+{
+ struct setup_data_node *node;
+ struct setup_data *data;
+ int error, no = 0;
+ struct dentry *d;
+ struct page *pg;
+ u64 pa_data;
+
+ d = debugfs_create_dir("setup_data", parent);
+ if (!d) {
+ error = -ENOMEM;
+ goto err_return;
+ }
+
+ pa_data = boot_params.hdr.setup_data;
+
+ while (pa_data) {
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ error = -ENOMEM;
+ goto err_dir;
+ }
+ pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
+ if (PageHighMem(pg)) {
+ data = ioremap_cache(pa_data, sizeof(*data));
+ if (!data) {
+ error = -ENXIO;
+ goto err_dir;
+ }
+ } else {
+ data = __va(pa_data);
+ }
+
+ node->paddr = pa_data;
+ node->type = data->type;
+ node->len = data->len;
+ error = create_setup_data_node(d, no, node);
+ pa_data = data->next;
+
+ if (PageHighMem(pg))
+ iounmap(data);
+ if (error)
+ goto err_dir;
+ no++;
+ }
+ return 0;
+
+err_dir:
+ debugfs_remove(d);
+err_return:
+ return error;
+}
+
static struct debugfs_blob_wrapper boot_params_blob = {
- .data = &boot_params,
- .size = sizeof(boot_params),
+ .data = &boot_params,
+ .size = sizeof(boot_params),
};
static int __init boot_params_kdebugfs_init(void)
{
- int error;
struct dentry *dbp, *version, *data;
+ int error;
dbp = debugfs_create_dir("boot_params", NULL);
if (!dbp) {
@@ -41,7 +189,13 @@ static int __init boot_params_kdebugfs_init(void)
error = -ENOMEM;
goto err_version;
}
+ error = create_setup_data_nodes(dbp);
+ if (error)
+ goto err_data;
return 0;
+
+err_data:
+ debugfs_remove(data);
err_version:
debugfs_remove(version);
err_dir:
@@ -61,5 +215,4 @@ static int __init arch_kdebugfs_init(void)
return error;
}
-
arch_initcall(arch_kdebugfs_init);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
new file mode 100644
index 00000000000..8b7a3cf37d2
--- /dev/null
+++ b/arch/x86/kernel/kvm.c
@@ -0,0 +1,248 @@
+/*
+ * KVM paravirt_ops implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright IBM Corporation, 2007
+ * Authors: Anthony Liguori <aliguori@us.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kvm_para.h>
+#include <linux/cpu.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/hardirq.h>
+
+#define MMU_QUEUE_SIZE 1024
+
+struct kvm_para_state {
+ u8 mmu_queue[MMU_QUEUE_SIZE];
+ int mmu_queue_len;
+ enum paravirt_lazy_mode mode;
+};
+
+static DEFINE_PER_CPU(struct kvm_para_state, para_state);
+
+static struct kvm_para_state *kvm_para_state(void)
+{
+ return &per_cpu(para_state, raw_smp_processor_id());
+}
+
+/*
+ * No need for any "IO delay" on KVM
+ */
+static void kvm_io_delay(void)
+{
+}
+
+static void kvm_mmu_op(void *buffer, unsigned len)
+{
+ int r;
+ unsigned long a1, a2;
+
+ do {
+ a1 = __pa(buffer);
+ a2 = 0; /* on i386 __pa() always returns <4G */
+ r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
+ buffer += r;
+ len -= r;
+ } while (len);
+}
+
+static void mmu_queue_flush(struct kvm_para_state *state)
+{
+ if (state->mmu_queue_len) {
+ kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
+ state->mmu_queue_len = 0;
+ }
+}
+
+static void kvm_deferred_mmu_op(void *buffer, int len)
+{
+ struct kvm_para_state *state = kvm_para_state();
+
+ if (state->mode != PARAVIRT_LAZY_MMU) {
+ kvm_mmu_op(buffer, len);
+ return;
+ }
+ if (state->mmu_queue_len + len > sizeof state->mmu_queue)
+ mmu_queue_flush(state);
+ memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
+ state->mmu_queue_len += len;
+}
+
+static void kvm_mmu_write(void *dest, u64 val)
+{
+ __u64 pte_phys;
+ struct kvm_mmu_op_write_pte wpte;
+
+#ifdef CONFIG_HIGHPTE
+ struct page *page;
+ unsigned long dst = (unsigned long) dest;
+
+ page = kmap_atomic_to_page(dest);
+ pte_phys = page_to_pfn(page);
+ pte_phys <<= PAGE_SHIFT;
+ pte_phys += (dst & ~(PAGE_MASK));
+#else
+ pte_phys = (unsigned long)__pa(dest);
+#endif
+ wpte.header.op = KVM_MMU_OP_WRITE_PTE;
+ wpte.pte_val = val;
+ wpte.pte_phys = pte_phys;
+
+ kvm_deferred_mmu_op(&wpte, sizeof wpte);
+}
+
+/*
+ * We only need to hook operations that are MMU writes. We hook these so that
+ * we can use lazy MMU mode to batch these operations. We could probably
+ * improve the performance of the host code if we used some of the information
+ * here to simplify processing of batched writes.
+ */
+static void kvm_set_pte(pte_t *ptep, pte_t pte)
+{
+ kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ kvm_mmu_write(pmdp, pmd_val(pmd));
+}
+
+#if PAGETABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+ kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_set_pte_present(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ kvm_mmu_write(ptep, 0);
+}
+
+static void kvm_pmd_clear(pmd_t *pmdp)
+{
+ kvm_mmu_write(pmdp, 0);
+}
+#endif
+
+static void kvm_set_pud(pud_t *pudp, pud_t pud)
+{
+ kvm_mmu_write(pudp, pud_val(pud));
+}
+
+#if PAGETABLE_LEVELS == 4
+static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ kvm_mmu_write(pgdp, pgd_val(pgd));
+}
+#endif
+#endif /* PAGETABLE_LEVELS >= 3 */
+
+static void kvm_flush_tlb(void)
+{
+ struct kvm_mmu_op_flush_tlb ftlb = {
+ .header.op = KVM_MMU_OP_FLUSH_TLB,
+ };
+
+ kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
+}
+
+static void kvm_release_pt(u32 pfn)
+{
+ struct kvm_mmu_op_release_pt rpt = {
+ .header.op = KVM_MMU_OP_RELEASE_PT,
+ .pt_phys = (u64)pfn << PAGE_SHIFT,
+ };
+
+ kvm_mmu_op(&rpt, sizeof rpt);
+}
+
+static void kvm_enter_lazy_mmu(void)
+{
+ struct kvm_para_state *state = kvm_para_state();
+
+ paravirt_enter_lazy_mmu();
+ state->mode = paravirt_get_lazy_mode();
+}
+
+static void kvm_leave_lazy_mmu(void)
+{
+ struct kvm_para_state *state = kvm_para_state();
+
+ mmu_queue_flush(state);
+ paravirt_leave_lazy(paravirt_get_lazy_mode());
+ state->mode = paravirt_get_lazy_mode();
+}
+
+static void paravirt_ops_setup(void)
+{
+ pv_info.name = "KVM";
+ pv_info.paravirt_enabled = 1;
+
+ if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
+ pv_cpu_ops.io_delay = kvm_io_delay;
+
+ if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
+ pv_mmu_ops.set_pte = kvm_set_pte;
+ pv_mmu_ops.set_pte_at = kvm_set_pte_at;
+ pv_mmu_ops.set_pmd = kvm_set_pmd;
+#if PAGETABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+ pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
+ pv_mmu_ops.set_pte_present = kvm_set_pte_present;
+ pv_mmu_ops.pte_clear = kvm_pte_clear;
+ pv_mmu_ops.pmd_clear = kvm_pmd_clear;
+#endif
+ pv_mmu_ops.set_pud = kvm_set_pud;
+#if PAGETABLE_LEVELS == 4
+ pv_mmu_ops.set_pgd = kvm_set_pgd;
+#endif
+#endif
+ pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
+ pv_mmu_ops.release_pte = kvm_release_pt;
+ pv_mmu_ops.release_pmd = kvm_release_pt;
+ pv_mmu_ops.release_pud = kvm_release_pt;
+
+ pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
+ pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
+ }
+}
+
+void __init kvm_guest_init(void)
+{
+ if (!kvm_para_available())
+ return;
+
+ paravirt_ops_setup();
+}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
new file mode 100644
index 00000000000..ddee04043ae
--- /dev/null
+++ b/arch/x86/kernel/kvmclock.c
@@ -0,0 +1,187 @@
+/* KVM paravirtual clock driver. A clocksource implementation
+ Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include <linux/clocksource.h>
+#include <linux/kvm_para.h>
+#include <asm/arch_hooks.h>
+#include <asm/msr.h>
+#include <asm/apic.h>
+#include <linux/percpu.h>
+#include <asm/reboot.h>
+
+#define KVM_SCALE 22
+
+static int kvmclock = 1;
+
+static int parse_no_kvmclock(char *arg)
+{
+ kvmclock = 0;
+ return 0;
+}
+early_param("no-kvmclock", parse_no_kvmclock);
+
+/* The hypervisor will put information about time periodically here */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct kvm_vcpu_time_info, hv_clock);
+#define get_clock(cpu, field) per_cpu(hv_clock, cpu).field
+
+static inline u64 kvm_get_delta(u64 last_tsc)
+{
+ int cpu = smp_processor_id();
+ u64 delta = native_read_tsc() - last_tsc;
+ return (delta * get_clock(cpu, tsc_to_system_mul)) >> KVM_SCALE;
+}
+
+static struct kvm_wall_clock wall_clock;
+static cycle_t kvm_clock_read(void);
+/*
+ * The wallclock is the time of day when we booted. Since then, some time may
+ * have elapsed since the hypervisor wrote the data. So we try to account for
+ * that with system time
+ */
+unsigned long kvm_get_wallclock(void)
+{
+ u32 wc_sec, wc_nsec;
+ u64 delta;
+ struct timespec ts;
+ int version, nsec;
+ int low, high;
+
+ low = (int)__pa(&wall_clock);
+ high = ((u64)__pa(&wall_clock) >> 32);
+
+ delta = kvm_clock_read();
+
+ native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
+ do {
+ version = wall_clock.wc_version;
+ rmb();
+ wc_sec = wall_clock.wc_sec;
+ wc_nsec = wall_clock.wc_nsec;
+ rmb();
+ } while ((wall_clock.wc_version != version) || (version & 1));
+
+ delta = kvm_clock_read() - delta;
+ delta += wc_nsec;
+ nsec = do_div(delta, NSEC_PER_SEC);
+ set_normalized_timespec(&ts, wc_sec + delta, nsec);
+ /*
+ * Of all mechanisms of time adjustment I've tested, this one
+ * was the champion!
+ */
+ return ts.tv_sec + 1;
+}
+
+int kvm_set_wallclock(unsigned long now)
+{
+ return 0;
+}
+
+/*
+ * This is our read_clock function. The host puts an tsc timestamp each time
+ * it updates a new time. Without the tsc adjustment, we can have a situation
+ * in which a vcpu starts to run earlier (smaller system_time), but probes
+ * time later (compared to another vcpu), leading to backwards time
+ */
+static cycle_t kvm_clock_read(void)
+{
+ u64 last_tsc, now;
+ int cpu;
+
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ last_tsc = get_clock(cpu, tsc_timestamp);
+ now = get_clock(cpu, system_time);
+
+ now += kvm_get_delta(last_tsc);
+ preempt_enable();
+
+ return now;
+}
+static struct clocksource kvm_clock = {
+ .name = "kvm-clock",
+ .read = kvm_clock_read,
+ .rating = 400,
+ .mask = CLOCKSOURCE_MASK(64),
+ .mult = 1 << KVM_SCALE,
+ .shift = KVM_SCALE,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int kvm_register_clock(void)
+{
+ int cpu = smp_processor_id();
+ int low, high;
+ low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
+ high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
+
+ return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
+}
+
+static void kvm_setup_secondary_clock(void)
+{
+ /*
+ * Now that the first cpu already had this clocksource initialized,
+ * we shouldn't fail.
+ */
+ WARN_ON(kvm_register_clock());
+ /* ok, done with our trickery, call native */
+ setup_secondary_APIC_clock();
+}
+
+/*
+ * After the clock is registered, the host will keep writing to the
+ * registered memory location. If the guest happens to shutdown, this memory
+ * won't be valid. In cases like kexec, in which you install a new kernel, this
+ * means a random memory location will be kept being written. So before any
+ * kind of shutdown from our side, we unregister the clock by writting anything
+ * that does not have the 'enable' bit set in the msr
+ */
+#ifdef CONFIG_KEXEC
+static void kvm_crash_shutdown(struct pt_regs *regs)
+{
+ native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
+ native_machine_crash_shutdown(regs);
+}
+#endif
+
+static void kvm_shutdown(void)
+{
+ native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
+ native_machine_shutdown();
+}
+
+void __init kvmclock_init(void)
+{
+ if (!kvm_para_available())
+ return;
+
+ if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+ if (kvm_register_clock())
+ return;
+ pv_time_ops.get_wallclock = kvm_get_wallclock;
+ pv_time_ops.set_wallclock = kvm_set_wallclock;
+ pv_time_ops.sched_clock = kvm_clock_read;
+ pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
+ machine_ops.shutdown = kvm_shutdown;
+#ifdef CONFIG_KEXEC
+ machine_ops.crash_shutdown = kvm_crash_shutdown;
+#endif
+ clocksource_register(&kvm_clock);
+ }
+}
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index b402c0f3f19..cfc2648d25f 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -364,7 +364,8 @@ int __init mfgpt_timer_setup(void)
geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
/* Set up the clock event */
- mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, 32);
+ mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
+ mfgpt_clockevent.shift);
mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
&mfgpt_clockevent);
mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 70744e344fa..3e2c54dc8b2 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -686,13 +686,11 @@ void __init get_smp_config(void)
static int __init smp_scan_config(unsigned long base, unsigned long length,
unsigned reserve)
{
- extern void __bad_mpf_size(void);
unsigned int *bp = phys_to_virt(base);
struct intel_mp_floating *mpf;
Dprintk("Scan SMP from %p for %ld bytes.\n", bp, length);
- if (sizeof(*mpf) != 16)
- __bad_mpf_size();
+ BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) {
mpf = (struct intel_mp_floating *)bp;
@@ -801,7 +799,6 @@ void __init find_smp_config(void)
#ifdef CONFIG_X86_IO_APIC
#define MP_ISA_BUS 0
-#define MP_MAX_IOAPIC_PIN 127
extern struct mp_ioapic_routing mp_ioapic_routing[MAX_IO_APICS];
@@ -820,7 +817,7 @@ static int mp_find_ioapic(int gsi)
return -1;
}
-static u8 uniq_ioapic_id(u8 id)
+static u8 __init uniq_ioapic_id(u8 id)
{
#ifdef CONFIG_X86_32
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
@@ -909,14 +906,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
intsrc.mpc_dstirq = pin; /* INTIN# */
- Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
- intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
- (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
- intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-
- mp_irqs[mp_irq_entries] = intsrc;
- if (++mp_irq_entries == MAX_IRQ_SOURCES)
- panic("Max # of irq sources exceeded!\n");
+ MP_intsrc_info(&intsrc);
}
int es7000_plat;
@@ -985,23 +975,14 @@ void __init mp_config_acpi_legacy_irqs(void)
intsrc.mpc_srcbusirq = i; /* Identity mapped */
intsrc.mpc_dstirq = i;
- Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
- "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
- (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
- intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
- intsrc.mpc_dstirq);
-
- mp_irqs[mp_irq_entries] = intsrc;
- if (++mp_irq_entries == MAX_IRQ_SOURCES)
- panic("Max # of irq sources exceeded!\n");
+ MP_intsrc_info(&intsrc);
}
}
int mp_register_gsi(u32 gsi, int triggering, int polarity)
{
- int ioapic = -1;
- int ioapic_pin = 0;
- int idx, bit = 0;
+ int ioapic;
+ int ioapic_pin;
#ifdef CONFIG_X86_32
#define MAX_GSI_NUM 4096
#define IRQ_COMPRESSION_START 64
@@ -1041,15 +1022,13 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
* with redundant pin->gsi mappings (but unique PCI devices);
* we only program the IOAPIC on the first.
*/
- bit = ioapic_pin % 32;
- idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
- if (idx > 3) {
+ if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
printk(KERN_ERR "Invalid reference to IOAPIC pin "
"%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
ioapic_pin);
return gsi;
}
- if ((1 << bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
+ if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) {
Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
#ifdef CONFIG_X86_32
@@ -1059,7 +1038,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
#endif
}
- mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1 << bit);
+ set_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed);
#ifdef CONFIG_X86_32
/*
* For GSI >= 64, use IRQ compression
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 2edee22e9c3..e28ec497e14 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -43,6 +43,7 @@
#include <asm/system.h>
#include <asm/dma.h>
#include <asm/rio.h>
+#include <asm/bios_ebda.h>
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
int use_calgary __read_mostly = 1;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3004d716539..67e9b4a1e89 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -4,6 +4,8 @@
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/pm.h>
struct kmem_cache *task_xstate_cachep;
@@ -42,3 +44,118 @@ void arch_task_cache_init(void)
__alignof__(union thread_xstate),
SLAB_PANIC, NULL);
}
+
+static void do_nothing(void *unused)
+{
+}
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+ smp_mb();
+ /* kick all the CPUs so that they exit out of pm_idle */
+ smp_call_function(do_nothing, NULL, 0, 1);
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
+{
+ if (!need_resched()) {
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __mwait(ax, cx);
+ }
+}
+
+/* Default MONITOR/MWAIT with no hints, used for default C1 state */
+static void mwait_idle(void)
+{
+ if (!need_resched()) {
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __sti_mwait(0, 0);
+ else
+ local_irq_enable();
+ } else
+ local_irq_enable();
+}
+
+
+static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+{
+ if (force_mwait)
+ return 1;
+ /* Any C1 states supported? */
+ return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
+}
+
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static void poll_idle(void)
+{
+ local_irq_enable();
+ cpu_relax();
+}
+
+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
+{
+ static int selected;
+
+ if (selected)
+ return;
+#ifdef CONFIG_X86_SMP
+ if (pm_idle == poll_idle && smp_num_siblings > 1) {
+ printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
+ " performance may degrade.\n");
+ }
+#endif
+ if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
+ /*
+ * Skip, if setup has overridden idle.
+ * One CPU supports mwait => All CPUs supports mwait
+ */
+ if (!pm_idle) {
+ printk(KERN_INFO "using mwait in idle threads.\n");
+ pm_idle = mwait_idle;
+ }
+ }
+ selected = 1;
+}
+
+static int __init idle_setup(char *str)
+{
+ if (!strcmp(str, "poll")) {
+ printk("using polling idle threads.\n");
+ pm_idle = poll_idle;
+ } else if (!strcmp(str, "mwait"))
+ force_mwait = 1;
+ else
+ return -1;
+
+ boot_option_idle_override = 1;
+ return 0;
+}
+early_param("idle", idle_setup);
+
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 77de848bd1f..f8476dfbb60 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -111,12 +111,10 @@ void default_idle(void)
*/
smp_mb();
- local_irq_disable();
- if (!need_resched()) {
+ if (!need_resched())
safe_halt(); /* enables interrupts racelessly */
- local_irq_disable();
- }
- local_irq_enable();
+ else
+ local_irq_enable();
current_thread_info()->status |= TS_POLLING;
} else {
local_irq_enable();
@@ -128,17 +126,6 @@ void default_idle(void)
EXPORT_SYMBOL(default_idle);
#endif
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
-{
- local_irq_enable();
- cpu_relax();
-}
-
#ifdef CONFIG_HOTPLUG_CPU
#include <asm/nmi.h>
/* We don't actually take CPU down, just spin without interrupts. */
@@ -196,6 +183,7 @@ void cpu_idle(void)
if (cpu_is_offline(cpu))
play_dead();
+ local_irq_disable();
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
idle();
}
@@ -206,104 +194,6 @@ void cpu_idle(void)
}
}
-static void do_nothing(void *unused)
-{
-}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
- * pm_idle and update to new pm_idle value. Required while changing pm_idle
- * handler on SMP systems.
- *
- * Caller must have changed pm_idle to the new value before the call. Old
- * pm_idle value will not be used by any CPU after the return of this function.
- */
-void cpu_idle_wait(void)
-{
- smp_mb();
- /* kick all the CPUs so that they exit out of pm_idle */
- smp_call_function(do_nothing, NULL, 0, 1);
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
- if (!need_resched()) {
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __sti_mwait(ax, cx);
- else
- local_irq_enable();
- } else
- local_irq_enable();
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
- local_irq_enable();
- mwait_idle_with_hints(0, 0);
-}
-
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
-{
- if (force_mwait)
- return 1;
- /* Any C1 states supported? */
- return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
-}
-
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
-{
- static int selected;
-
- if (selected)
- return;
-#ifdef CONFIG_X86_SMP
- if (pm_idle == poll_idle && smp_num_siblings > 1) {
- printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
- " performance may degrade.\n");
- }
-#endif
- if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
- /*
- * Skip, if setup has overridden idle.
- * One CPU supports mwait => All CPUs supports mwait
- */
- if (!pm_idle) {
- printk(KERN_INFO "using mwait in idle threads.\n");
- pm_idle = mwait_idle;
- }
- }
- selected = 1;
-}
-
-static int __init idle_setup(char *str)
-{
- if (!strcmp(str, "poll")) {
- printk("using polling idle threads.\n");
- pm_idle = poll_idle;
- } else if (!strcmp(str, "mwait"))
- force_mwait = 1;
- else
- return -1;
-
- boot_option_idle_override = 1;
- return 0;
-}
-early_param("idle", idle_setup);
-
void __show_registers(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 131c2ee7ac5..e2319f39988 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -106,26 +106,13 @@ void default_idle(void)
* test NEED_RESCHED:
*/
smp_mb();
- local_irq_disable();
- if (!need_resched()) {
+ if (!need_resched())
safe_halt(); /* enables interrupts racelessly */
- local_irq_disable();
- }
- local_irq_enable();
+ else
+ local_irq_enable();
current_thread_info()->status |= TS_POLLING;
}
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
-{
- local_irq_enable();
- cpu_relax();
-}
-
#ifdef CONFIG_HOTPLUG_CPU
DECLARE_PER_CPU(int, cpu_state);
@@ -192,110 +179,6 @@ void cpu_idle(void)
}
}
-static void do_nothing(void *unused)
-{
-}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
- * pm_idle and update to new pm_idle value. Required while changing pm_idle
- * handler on SMP systems.
- *
- * Caller must have changed pm_idle to the new value before the call. Old
- * pm_idle value will not be used by any CPU after the return of this function.
- */
-void cpu_idle_wait(void)
-{
- smp_mb();
- /* kick all the CPUs so that they exit out of pm_idle */
- smp_call_function(do_nothing, NULL, 0, 1);
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
- if (!need_resched()) {
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(ax, cx);
- }
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
- if (!need_resched()) {
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __sti_mwait(0, 0);
- else
- local_irq_enable();
- } else {
- local_irq_enable();
- }
-}
-
-
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
-{
- if (force_mwait)
- return 1;
- /* Any C1 states supported? */
- return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
-}
-
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
-{
- static int selected;
-
- if (selected)
- return;
-#ifdef CONFIG_X86_SMP
- if (pm_idle == poll_idle && smp_num_siblings > 1) {
- printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
- " performance may degrade.\n");
- }
-#endif
- if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
- /*
- * Skip, if setup has overridden idle.
- * One CPU supports mwait => All CPUs supports mwait
- */
- if (!pm_idle) {
- printk(KERN_INFO "using mwait in idle threads.\n");
- pm_idle = mwait_idle;
- }
- }
- selected = 1;
-}
-
-static int __init idle_setup(char *str)
-{
- if (!strcmp(str, "poll")) {
- printk("using polling idle threads.\n");
- pm_idle = poll_idle;
- } else if (!strcmp(str, "mwait"))
- force_mwait = 1;
- else
- return -1;
-
- boot_option_idle_override = 1;
- return 0;
-}
-early_param("idle", idle_setup);
-
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs * regs)
{
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 559c1b02741..fb03ef380f0 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1207,97 +1207,16 @@ static int genregs32_set(struct task_struct *target,
return ret;
}
-static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t caddr, compat_ulong_t cdata)
{
- siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
- compat_siginfo_t __user *si32 = compat_ptr(data);
- siginfo_t ssi;
- int ret;
-
- if (request == PTRACE_SETSIGINFO) {
- memset(&ssi, 0, sizeof(siginfo_t));
- ret = copy_siginfo_from_user32(&ssi, si32);
- if (ret)
- return ret;
- if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
- return -EFAULT;
- }
- ret = sys_ptrace(request, pid, addr, (unsigned long)si);
- if (ret)
- return ret;
- if (request == PTRACE_GETSIGINFO) {
- if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
- return -EFAULT;
- ret = copy_siginfo_to_user32(si32, &ssi);
- }
- return ret;
-}
-
-asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
-{
- struct task_struct *child;
- struct pt_regs *childregs;
+ unsigned long addr = caddr;
+ unsigned long data = cdata;
void __user *datap = compat_ptr(data);
int ret;
__u32 val;
switch (request) {
- case PTRACE_TRACEME:
- case PTRACE_ATTACH:
- case PTRACE_KILL:
- case PTRACE_CONT:
- case PTRACE_SINGLESTEP:
- case PTRACE_SINGLEBLOCK:
- case PTRACE_DETACH:
- case PTRACE_SYSCALL:
- case PTRACE_OLDSETOPTIONS:
- case PTRACE_SETOPTIONS:
- case PTRACE_SET_THREAD_AREA:
- case PTRACE_GET_THREAD_AREA:
-#ifdef X86_BTS
- case PTRACE_BTS_CONFIG:
- case PTRACE_BTS_STATUS:
- case PTRACE_BTS_SIZE:
- case PTRACE_BTS_GET:
- case PTRACE_BTS_CLEAR:
- case PTRACE_BTS_DRAIN:
-#endif
- return sys_ptrace(request, pid, addr, data);
-
- default:
- return -EINVAL;
-
- case PTRACE_PEEKTEXT:
- case PTRACE_PEEKDATA:
- case PTRACE_POKEDATA:
- case PTRACE_POKETEXT:
- case PTRACE_POKEUSR:
- case PTRACE_PEEKUSR:
- case PTRACE_GETREGS:
- case PTRACE_SETREGS:
- case PTRACE_SETFPREGS:
- case PTRACE_GETFPREGS:
- case PTRACE_SETFPXREGS:
- case PTRACE_GETFPXREGS:
- case PTRACE_GETEVENTMSG:
- break;
-
- case PTRACE_SETSIGINFO:
- case PTRACE_GETSIGINFO:
- return ptrace32_siginfo(request, pid, addr, data);
- }
-
- child = ptrace_get_task_struct(pid);
- if (IS_ERR(child))
- return PTR_ERR(child);
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out;
-
- childregs = task_pt_regs(child);
-
- switch (request) {
case PTRACE_PEEKUSR:
ret = getreg32(child, addr, &val);
if (ret == 0)
@@ -1343,12 +1262,14 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
sizeof(struct user32_fxsr_struct),
datap);
+ case PTRACE_GET_THREAD_AREA:
+ case PTRACE_SET_THREAD_AREA:
+ return arch_ptrace(child, request, addr, data);
+
default:
return compat_ptrace_request(child, request, addr, data);
}
- out:
- put_task_struct(child);
return ret;
}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 1791a751a77..a4a838306b2 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -399,7 +399,7 @@ static void native_machine_emergency_restart(void)
}
}
-static void native_machine_shutdown(void)
+void native_machine_shutdown(void)
{
/* Stop the cpus and apics */
#ifdef CONFIG_SMP
@@ -470,7 +470,10 @@ struct machine_ops machine_ops = {
.shutdown = native_machine_shutdown,
.emergency_restart = native_machine_emergency_restart,
.restart = native_machine_restart,
- .halt = native_machine_halt
+ .halt = native_machine_halt,
+#ifdef CONFIG_KEXEC
+ .crash_shutdown = native_machine_crash_shutdown,
+#endif
};
void machine_power_off(void)
@@ -498,3 +501,9 @@ void machine_halt(void)
machine_ops.halt();
}
+#ifdef CONFIG_KEXEC
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ machine_ops.crash_shutdown(regs);
+}
+#endif
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 455d3c80960..2283422af79 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -47,6 +47,7 @@
#include <linux/pfn.h>
#include <linux/pci.h>
#include <linux/init_ohci1394_dma.h>
+#include <linux/kvm_para.h>
#include <video/edid.h>
@@ -389,7 +390,6 @@ unsigned long __init find_max_low_pfn(void)
return max_low_pfn;
}
-#define BIOS_EBDA_SEGMENT 0x40E
#define BIOS_LOWMEM_KILOBYTES 0x413
/*
@@ -420,8 +420,7 @@ static void __init reserve_ebda_region(void)
lowmem <<= 10;
/* start of EBDA area */
- ebda_addr = *(unsigned short *)__va(BIOS_EBDA_SEGMENT);
- ebda_addr <<= 4;
+ ebda_addr = get_bios_ebda();
/* Fixup: bios puts an EBDA in the top 64K segment */
/* of conventional memory, but does not adjust lowmem. */
@@ -822,6 +821,10 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = setup_memory();
+#ifdef CONFIG_KVM_CLOCK
+ kvmclock_init();
+#endif
+
#ifdef CONFIG_VMI
/*
* Must be after max_low_pfn is determined, and before kernel
@@ -829,6 +832,7 @@ void __init setup_arch(char **cmdline_p)
*/
vmi_init();
#endif
+ kvm_guest_init();
/*
* NOTE: before this point _nobody_ is allowed to allocate
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index c2ec3dcb6b9..a94fb959a87 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -42,6 +42,7 @@
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include <linux/init_ohci1394_dma.h>
+#include <linux/kvm_para.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
@@ -116,7 +117,7 @@ extern int root_mountflags;
char __initdata command_line[COMMAND_LINE_SIZE];
-struct resource standard_io_resources[] = {
+static struct resource standard_io_resources[] = {
{ .name = "dma1", .start = 0x00, .end = 0x1f,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "pic1", .start = 0x20, .end = 0x21,
@@ -190,6 +191,7 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
e820_register_active_regions(0, start_pfn, end_pfn);
free_bootmem_with_active_regions(0, end_pfn);
+ early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
}
#endif
@@ -264,6 +266,28 @@ void __attribute__((weak)) __init memory_setup(void)
machine_specific_memory_setup();
}
+static void __init parse_setup_data(void)
+{
+ struct setup_data *data;
+ unsigned long pa_data;
+
+ if (boot_params.hdr.version < 0x0209)
+ return;
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = early_ioremap(pa_data, PAGE_SIZE);
+ switch (data->type) {
+ default:
+ break;
+ }
+#ifndef CONFIG_DEBUG_BOOT_PARAMS
+ free_early(pa_data, pa_data+sizeof(*data)+data->len);
+#endif
+ pa_data = data->next;
+ early_iounmap(data, PAGE_SIZE);
+ }
+}
+
/*
* setup_arch - architecture-specific boot-time initializations
*
@@ -316,6 +340,8 @@ void __init setup_arch(char **cmdline_p)
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
+ parse_setup_data();
+
parse_early_param();
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
@@ -359,6 +385,10 @@ void __init setup_arch(char **cmdline_p)
io_delay_init();
+#ifdef CONFIG_KVM_CLOCK
+ kvmclock_init();
+#endif
+
#ifdef CONFIG_SMP
/* setup to use the early static init tables during kernel startup */
x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
@@ -397,8 +427,6 @@ void __init setup_arch(char **cmdline_p)
contig_initmem_init(0, end_pfn);
#endif
- early_res_to_bootmem();
-
dma32_reserve_bootmem();
#ifdef CONFIG_ACPI_SLEEP
@@ -465,6 +493,8 @@ void __init setup_arch(char **cmdline_p)
init_apic_mappings();
ioapic_init_mappings();
+ kvm_guest_init();
+
/*
* We trust e820 completely. No explicit ROM probing in memory.
*/
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index f1b11793083..8e05e7f7bd4 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -413,16 +413,6 @@ setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
regs->ss = __USER_DS;
regs->cs = __USER_CS;
- /*
- * Clear TF when entering the signal handler, but
- * notify any tracer that was single-stepping it.
- * The tracer may want to single-step inside the
- * handler too.
- */
- regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-
return 0;
give_sigsegv:
@@ -501,16 +491,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->ss = __USER_DS;
regs->cs = __USER_CS;
- /*
- * Clear TF when entering the signal handler, but
- * notify any tracer that was single-stepping it.
- * The tracer may want to single-step inside the
- * handler too.
- */
- regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-
return 0;
give_sigsegv:
@@ -566,6 +546,21 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
if (ret)
return ret;
+ /*
+ * Clear the direction flag as per the ABI for function entry.
+ */
+ regs->flags &= ~X86_EFLAGS_DF;
+
+ /*
+ * Clear TF when entering the signal handler, but
+ * notify any tracer that was single-stepping it.
+ * The tracer may want to single-step inside the
+ * handler too.
+ */
+ regs->flags &= ~X86_EFLAGS_TF;
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 827179c5b32..ccb2a4560c2 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -285,14 +285,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
even if the handler happens to be interrupting 32-bit code. */
regs->cs = __USER_CS;
- /* This, by contrast, has nothing to do with segment registers -
- see include/asm-x86_64/uaccess.h for details. */
- set_fs(USER_DS);
-
- regs->flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_DF);
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-
return 0;
give_sigsegv:
@@ -380,6 +372,28 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
ret = setup_rt_frame(sig, ka, info, oldset, regs);
if (ret == 0) {
+ /*
+ * This has nothing to do with segment registers,
+ * despite the name. This magic affects uaccess.h
+ * macros' behavior. Reset it to the normal setting.
+ */
+ set_fs(USER_DS);
+
+ /*
+ * Clear the direction flag as per the ABI for function entry.
+ */
+ regs->flags &= ~X86_EFLAGS_DF;
+
+ /*
+ * Clear TF when entering the signal handler, but
+ * notify any tracer that was single-stepping it.
+ * The tracer may want to single-step inside the
+ * handler too.
+ */
+ regs->flags &= ~X86_EFLAGS_TF;
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index eef79e84145..04c662ba18f 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1058,7 +1058,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
check_tsc_sync_source(cpu);
local_irq_restore(flags);
- while (!cpu_isset(cpu, cpu_online_map)) {
+ while (!cpu_online(cpu)) {
cpu_relax();
touch_nmi_watchdog();
}
@@ -1168,7 +1168,7 @@ static void __init smp_cpu_index_default(void)
int i;
struct cpuinfo_x86 *c;
- for_each_cpu_mask(i, cpu_possible_map) {
+ for_each_possible_cpu(i) {
c = &cpu_data(i);
/* mark all to hotplug */
c->cpu_index = NR_CPUS;
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
index 6878a9c2df5..ae751094eba 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/summit_32.c
@@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/io.h>
+#include <asm/bios_ebda.h>
#include <asm/mach-summit/mach_mpparse.h>
static struct rio_table_hdr *rio_table_hdr __initdata;
@@ -140,8 +141,8 @@ void __init setup_summit(void)
int i, next_wpeg, next_bus = 0;
/* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */
- ptr = *(unsigned short *)phys_to_virt(0x40Eul);
- ptr = (unsigned long)phys_to_virt(ptr << 4);
+ ptr = get_bios_ebda();
+ ptr = (unsigned long)phys_to_virt(ptr);
rio_table_hdr = NULL;
offset = 0x180;
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index df224a8774c..a1f07d79320 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -195,9 +195,9 @@ static int __cpuinit init_smp_flush(void)
{
int i;
- for_each_cpu_mask(i, cpu_possible_map) {
+ for_each_possible_cpu(i)
spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
- }
+
return 0;
}
core_initcall(init_smp_flush);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
index 64580679861..d8ccc3c6552 100644
--- a/arch/x86/kernel/trampoline_32.S
+++ b/arch/x86/kernel/trampoline_32.S
@@ -33,7 +33,7 @@
/* We can free up trampoline after bootup if cpu hotplug is not supported. */
#ifndef CONFIG_HOTPLUG_CPU
-.section ".init.data","aw",@progbits
+.section ".cpuinit.data","aw",@progbits
#else
.section .rodata,"a",@progbits
#endif
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 471e694d671..bde6f63e15d 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -602,7 +602,7 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
-DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
+DO_ERROR_INFO(32, SIGILL, "iret exception", iret_error, ILL_BADSTK, 0, 1)
void __kprobes do_general_protection(struct pt_regs *regs, long error_code)
{
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 41962e793c0..8d45fabc5f3 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -19,7 +19,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM && EXPERIMENTAL
+ depends on HAVE_KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
---help---
@@ -50,6 +50,17 @@ config KVM_AMD
Provides support for KVM on AMD processors equipped with the AMD-V
(SVM) extensions.
+config KVM_TRACE
+ bool "KVM trace support"
+ depends on KVM && MARKERS && SYSFS
+ select RELAY
+ select DEBUG_FS
+ default n
+ ---help---
+ This option allows reading a trace of kvm-related events through
+ relayfs. Note the ABI is not considered stable and will be
+ modified in future updates.
+
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
# the virtualization menu.
source drivers/lguest/Kconfig
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index ffdd0b31078..c97d35c218d 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -3,10 +3,14 @@
#
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
+ifeq ($(CONFIG_KVM_TRACE),y)
+common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
+endif
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
-kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o
+kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o \
+ i8254.o
obj-$(CONFIG_KVM) += kvm.o
kvm-intel-objs = vmx.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
new file mode 100644
index 00000000000..361e3161127
--- /dev/null
+++ b/arch/x86/kvm/i8254.c
@@ -0,0 +1,611 @@
+/*
+ * 8253/8254 interval timer emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2006 Intel Corporation
+ * Copyright (c) 2007 Keir Fraser, XenSource Inc
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Authors:
+ * Sheng Yang <sheng.yang@intel.com>
+ * Based on QEMU and Xen.
+ */
+
+#include <linux/kvm_host.h>
+
+#include "irq.h"
+#include "i8254.h"
+
+#ifndef CONFIG_X86_64
+#define mod_64(x, y) ((x) - (y) * div64_64(x, y))
+#else
+#define mod_64(x, y) ((x) % (y))
+#endif
+
+#define RW_STATE_LSB 1
+#define RW_STATE_MSB 2
+#define RW_STATE_WORD0 3
+#define RW_STATE_WORD1 4
+
+/* Compute with 96 bit intermediate result: (a*b)/c */
+static u64 muldiv64(u64 a, u32 b, u32 c)
+{
+ union {
+ u64 ll;
+ struct {
+ u32 low, high;
+ } l;
+ } u, res;
+ u64 rl, rh;
+
+ u.ll = a;
+ rl = (u64)u.l.low * (u64)b;
+ rh = (u64)u.l.high * (u64)b;
+ rh += (rl >> 32);
+ res.l.high = div64_64(rh, c);
+ res.l.low = div64_64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
+ return res.ll;
+}
+
+static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ switch (c->mode) {
+ default:
+ case 0:
+ case 4:
+ /* XXX: just disable/enable counting */
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 5:
+ /* Restart counting on rising edge. */
+ if (c->gate < val)
+ c->count_load_time = ktime_get();
+ break;
+ }
+
+ c->gate = val;
+}
+
+int pit_get_gate(struct kvm *kvm, int channel)
+{
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ return kvm->arch.vpit->pit_state.channels[channel].gate;
+}
+
+static int pit_get_count(struct kvm *kvm, int channel)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+ s64 d, t;
+ int counter;
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
+ d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+
+ switch (c->mode) {
+ case 0:
+ case 1:
+ case 4:
+ case 5:
+ counter = (c->count - d) & 0xffff;
+ break;
+ case 3:
+ /* XXX: may be incorrect for odd counts */
+ counter = c->count - (mod_64((2 * d), c->count));
+ break;
+ default:
+ counter = c->count - mod_64(d, c->count);
+ break;
+ }
+ return counter;
+}
+
+static int pit_get_out(struct kvm *kvm, int channel)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+ s64 d, t;
+ int out;
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
+ d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+
+ switch (c->mode) {
+ default:
+ case 0:
+ out = (d >= c->count);
+ break;
+ case 1:
+ out = (d < c->count);
+ break;
+ case 2:
+ out = ((mod_64(d, c->count) == 0) && (d != 0));
+ break;
+ case 3:
+ out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
+ break;
+ case 4:
+ case 5:
+ out = (d == c->count);
+ break;
+ }
+
+ return out;
+}
+
+static void pit_latch_count(struct kvm *kvm, int channel)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ if (!c->count_latched) {
+ c->latched_count = pit_get_count(kvm, channel);
+ c->count_latched = c->rw_mode;
+ }
+}
+
+static void pit_latch_status(struct kvm *kvm, int channel)
+{
+ struct kvm_kpit_channel_state *c =
+ &kvm->arch.vpit->pit_state.channels[channel];
+
+ WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+ if (!c->status_latched) {
+ /* TODO: Return NULL COUNT (bit 6). */
+ c->status = ((pit_get_out(kvm, channel) << 7) |
+ (c->rw_mode << 4) |
+ (c->mode << 1) |
+ c->bcd);
+ c->status_latched = 1;
+ }
+}
+
+int __pit_timer_fn(struct kvm_kpit_state *ps)
+{
+ struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0];
+ struct kvm_kpit_timer *pt = &ps->pit_timer;
+
+ atomic_inc(&pt->pending);
+ smp_mb__after_atomic_inc();
+ /* FIXME: handle case where the guest is in guest mode */
+ if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
+ vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ wake_up_interruptible(&vcpu0->wq);
+ }
+
+ pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
+ pt->scheduled = ktime_to_ns(pt->timer.expires);
+
+ return (pt->period == 0 ? 0 : 1);
+}
+
+int pit_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pit *pit = vcpu->kvm->arch.vpit;
+
+ if (pit && vcpu->vcpu_id == 0)
+ return atomic_read(&pit->pit_state.pit_timer.pending);
+
+ return 0;
+}
+
+static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
+{
+ struct kvm_kpit_state *ps;
+ int restart_timer = 0;
+
+ ps = container_of(data, struct kvm_kpit_state, pit_timer.timer);
+
+ restart_timer = __pit_timer_fn(ps);
+
+ if (restart_timer)
+ return HRTIMER_RESTART;
+ else
+ return HRTIMER_NORESTART;
+}
+
+static void destroy_pit_timer(struct kvm_kpit_timer *pt)
+{
+ pr_debug("pit: execute del timer!\n");
+ hrtimer_cancel(&pt->timer);
+}
+
+static void create_pit_timer(struct kvm_kpit_timer *pt, u32 val, int is_period)
+{
+ s64 interval;
+
+ interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
+
+ pr_debug("pit: create pit timer, interval is %llu nsec\n", interval);
+
+ /* TODO The new value only affected after the retriggered */
+ hrtimer_cancel(&pt->timer);
+ pt->period = (is_period == 0) ? 0 : interval;
+ pt->timer.function = pit_timer_fn;
+ atomic_set(&pt->pending, 0);
+
+ hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval),
+ HRTIMER_MODE_ABS);
+}
+
+static void pit_load_count(struct kvm *kvm, int channel, u32 val)
+{
+ struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+
+ WARN_ON(!mutex_is_locked(&ps->lock));
+
+ pr_debug("pit: load_count val is %d, channel is %d\n", val, channel);
+
+ /*
+ * Though spec said the state of 8254 is undefined after power-up,
+ * seems some tricky OS like Windows XP depends on IRQ0 interrupt
+ * when booting up.
+ * So here setting initialize rate for it, and not a specific number
+ */
+ if (val == 0)
+ val = 0x10000;
+
+ ps->channels[channel].count_load_time = ktime_get();
+ ps->channels[channel].count = val;
+
+ if (channel != 0)
+ return;
+
+ /* Two types of timer
+ * mode 1 is one shot, mode 2 is period, otherwise del timer */
+ switch (ps->channels[0].mode) {
+ case 1:
+ create_pit_timer(&ps->pit_timer, val, 0);
+ break;
+ case 2:
+ create_pit_timer(&ps->pit_timer, val, 1);
+ break;
+ default:
+ destroy_pit_timer(&ps->pit_timer);
+ }
+}
+
+void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val)
+{
+ mutex_lock(&kvm->arch.vpit->pit_state.lock);
+ pit_load_count(kvm, channel, val);
+ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+}
+
+static void pit_ioport_write(struct kvm_io_device *this,
+ gpa_t addr, int len, const void *data)
+{
+ struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_kpit_state *pit_state = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
+ int channel, access;
+ struct kvm_kpit_channel_state *s;
+ u32 val = *(u32 *) data;
+
+ val &= 0xff;
+ addr &= KVM_PIT_CHANNEL_MASK;
+
+ mutex_lock(&pit_state->lock);
+
+ if (val != 0)
+ pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n",
+ (unsigned int)addr, len, val);
+
+ if (addr == 3) {
+ channel = val >> 6;
+ if (channel == 3) {
+ /* Read-Back Command. */
+ for (channel = 0; channel < 3; channel++) {
+ s = &pit_state->channels[channel];
+ if (val & (2 << channel)) {
+ if (!(val & 0x20))
+ pit_latch_count(kvm, channel);
+ if (!(val & 0x10))
+ pit_latch_status(kvm, channel);
+ }
+ }
+ } else {
+ /* Select Counter <channel>. */
+ s = &pit_state->channels[channel];
+ access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
+ if (access == 0) {
+ pit_latch_count(kvm, channel);
+ } else {
+ s->rw_mode = access;
+ s->read_state = access;
+ s->write_state = access;
+ s->mode = (val >> 1) & 7;
+ if (s->mode > 5)
+ s->mode -= 4;
+ s->bcd = val & 1;
+ }
+ }
+ } else {
+ /* Write Count. */
+ s = &pit_state->channels[addr];
+ switch (s->write_state) {
+ default:
+ case RW_STATE_LSB:
+ pit_load_count(kvm, addr, val);
+ break;
+ case RW_STATE_MSB:
+ pit_load_count(kvm, addr, val << 8);
+ break;
+ case RW_STATE_WORD0:
+ s->write_latch = val;
+ s->write_state = RW_STATE_WORD1;
+ break;
+ case RW_STATE_WORD1:
+ pit_load_count(kvm, addr, s->write_latch | (val << 8));
+ s->write_state = RW_STATE_WORD0;
+ break;
+ }
+ }
+
+ mutex_unlock(&pit_state->lock);
+}
+
+static void pit_ioport_read(struct kvm_io_device *this,
+ gpa_t addr, int len, void *data)
+{
+ struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_kpit_state *pit_state = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
+ int ret, count;
+ struct kvm_kpit_channel_state *s;
+
+ addr &= KVM_PIT_CHANNEL_MASK;
+ s = &pit_state->channels[addr];
+
+ mutex_lock(&pit_state->lock);
+
+ if (s->status_latched) {
+ s->status_latched = 0;
+ ret = s->status;
+ } else if (s->count_latched) {
+ switch (s->count_latched) {
+ default:
+ case RW_STATE_LSB:
+ ret = s->latched_count & 0xff;
+ s->count_latched = 0;
+ break;
+ case RW_STATE_MSB:
+ ret = s->latched_count >> 8;
+ s->count_latched = 0;
+ break;
+ case RW_STATE_WORD0:
+ ret = s->latched_count & 0xff;
+ s->count_latched = RW_STATE_MSB;
+ break;
+ }
+ } else {
+ switch (s->read_state) {
+ default:
+ case RW_STATE_LSB:
+ count = pit_get_count(kvm, addr);
+ ret = count & 0xff;
+ break;
+ case RW_STATE_MSB:
+ count = pit_get_count(kvm, addr);
+ ret = (count >> 8) & 0xff;
+ break;
+ case RW_STATE_WORD0:
+ count = pit_get_count(kvm, addr);
+ ret = count & 0xff;
+ s->read_state = RW_STATE_WORD1;
+ break;
+ case RW_STATE_WORD1:
+ count = pit_get_count(kvm, addr);
+ ret = (count >> 8) & 0xff;
+ s->read_state = RW_STATE_WORD0;
+ break;
+ }
+ }
+
+ if (len > sizeof(ret))
+ len = sizeof(ret);
+ memcpy(data, (char *)&ret, len);
+
+ mutex_unlock(&pit_state->lock);
+}
+
+static int pit_in_range(struct kvm_io_device *this, gpa_t addr)
+{
+ return ((addr >= KVM_PIT_BASE_ADDRESS) &&
+ (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
+}
+
+static void speaker_ioport_write(struct kvm_io_device *this,
+ gpa_t addr, int len, const void *data)
+{
+ struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_kpit_state *pit_state = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
+ u32 val = *(u32 *) data;
+
+ mutex_lock(&pit_state->lock);
+ pit_state->speaker_data_on = (val >> 1) & 1;
+ pit_set_gate(kvm, 2, val & 1);
+ mutex_unlock(&pit_state->lock);
+}
+
+static void speaker_ioport_read(struct kvm_io_device *this,
+ gpa_t addr, int len, void *data)
+{
+ struct kvm_pit *pit = (struct kvm_pit *)this->private;
+ struct kvm_kpit_state *pit_state = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
+ unsigned int refresh_clock;
+ int ret;
+
+ /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
+ refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
+
+ mutex_lock(&pit_state->lock);
+ ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
+ (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
+ if (len > sizeof(ret))
+ len = sizeof(ret);
+ memcpy(data, (char *)&ret, len);
+ mutex_unlock(&pit_state->lock);
+}
+
+static int speaker_in_range(struct kvm_io_device *this, gpa_t addr)
+{
+ return (addr == KVM_SPEAKER_BASE_ADDRESS);
+}
+
+void kvm_pit_reset(struct kvm_pit *pit)
+{
+ int i;
+ struct kvm_kpit_channel_state *c;
+
+ mutex_lock(&pit->pit_state.lock);
+ for (i = 0; i < 3; i++) {
+ c = &pit->pit_state.channels[i];
+ c->mode = 0xff;
+ c->gate = (i != 2);
+ pit_load_count(pit->kvm, i, 0);
+ }
+ mutex_unlock(&pit->pit_state.lock);
+
+ atomic_set(&pit->pit_state.pit_timer.pending, 0);
+ pit->pit_state.inject_pending = 1;
+}
+
+struct kvm_pit *kvm_create_pit(struct kvm *kvm)
+{
+ struct kvm_pit *pit;
+ struct kvm_kpit_state *pit_state;
+
+ pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
+ if (!pit)
+ return NULL;
+
+ mutex_init(&pit->pit_state.lock);
+ mutex_lock(&pit->pit_state.lock);
+
+ /* Initialize PIO device */
+ pit->dev.read = pit_ioport_read;
+ pit->dev.write = pit_ioport_write;
+ pit->dev.in_range = pit_in_range;
+ pit->dev.private = pit;
+ kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
+
+ pit->speaker_dev.read = speaker_ioport_read;
+ pit->speaker_dev.write = speaker_ioport_write;
+ pit->speaker_dev.in_range = speaker_in_range;
+ pit->speaker_dev.private = pit;
+ kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
+
+ kvm->arch.vpit = pit;
+ pit->kvm = kvm;
+
+ pit_state = &pit->pit_state;
+ pit_state->pit = pit;
+ hrtimer_init(&pit_state->pit_timer.timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ mutex_unlock(&pit->pit_state.lock);
+
+ kvm_pit_reset(pit);
+
+ return pit;
+}
+
+void kvm_free_pit(struct kvm *kvm)
+{
+ struct hrtimer *timer;
+
+ if (kvm->arch.vpit) {
+ mutex_lock(&kvm->arch.vpit->pit_state.lock);
+ timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
+ hrtimer_cancel(timer);
+ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ kfree(kvm->arch.vpit);
+ }
+}
+
+void __inject_pit_timer_intr(struct kvm *kvm)
+{
+ mutex_lock(&kvm->lock);
+ kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1);
+ kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 0);
+ kvm_pic_set_irq(pic_irqchip(kvm), 0, 1);
+ kvm_pic_set_irq(pic_irqchip(kvm), 0, 0);
+ mutex_unlock(&kvm->lock);
+}
+
+void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pit *pit = vcpu->kvm->arch.vpit;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_kpit_state *ps;
+
+ if (vcpu && pit) {
+ ps = &pit->pit_state;
+
+ /* Try to inject pending interrupts when:
+ * 1. Pending exists
+ * 2. Last interrupt was accepted or waited for too long time*/
+ if (atomic_read(&ps->pit_timer.pending) &&
+ (ps->inject_pending ||
+ (jiffies - ps->last_injected_time
+ >= KVM_MAX_PIT_INTR_INTERVAL))) {
+ ps->inject_pending = 0;
+ __inject_pit_timer_intr(kvm);
+ ps->last_injected_time = jiffies;
+ }
+ }
+}
+
+void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
+{
+ struct kvm_arch *arch = &vcpu->kvm->arch;
+ struct kvm_kpit_state *ps;
+
+ if (vcpu && arch->vpit) {
+ ps = &arch->vpit->pit_state;
+ if (atomic_read(&ps->pit_timer.pending) &&
+ (((arch->vpic->pics[0].imr & 1) == 0 &&
+ arch->vpic->pics[0].irq_base == vec) ||
+ (arch->vioapic->redirtbl[0].fields.vector == vec &&
+ arch->vioapic->redirtbl[0].fields.mask != 1))) {
+ ps->inject_pending = 1;
+ atomic_dec(&ps->pit_timer.pending);
+ ps->channels[0].count_load_time = ktime_get();
+ }
+ }
+}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
new file mode 100644
index 00000000000..db25c2a6c8c
--- /dev/null
+++ b/arch/x86/kvm/i8254.h
@@ -0,0 +1,63 @@
+#ifndef __I8254_H
+#define __I8254_H
+
+#include "iodev.h"
+
+struct kvm_kpit_timer {
+ struct hrtimer timer;
+ int irq;
+ s64 period; /* unit: ns */
+ s64 scheduled;
+ ktime_t last_update;
+ atomic_t pending;
+};
+
+struct kvm_kpit_channel_state {
+ u32 count; /* can be 65536 */
+ u16 latched_count;
+ u8 count_latched;
+ u8 status_latched;
+ u8 status;
+ u8 read_state;
+ u8 write_state;
+ u8 write_latch;
+ u8 rw_mode;
+ u8 mode;
+ u8 bcd; /* not supported */
+ u8 gate; /* timer start */
+ ktime_t count_load_time;
+};
+
+struct kvm_kpit_state {
+ struct kvm_kpit_channel_state channels[3];
+ struct kvm_kpit_timer pit_timer;
+ u32 speaker_data_on;
+ struct mutex lock;
+ struct kvm_pit *pit;
+ bool inject_pending; /* if inject pending interrupts */
+ unsigned long last_injected_time;
+};
+
+struct kvm_pit {
+ unsigned long base_addresss;
+ struct kvm_io_device dev;
+ struct kvm_io_device speaker_dev;
+ struct kvm *kvm;
+ struct kvm_kpit_state pit_state;
+};
+
+#define KVM_PIT_BASE_ADDRESS 0x40
+#define KVM_SPEAKER_BASE_ADDRESS 0x61
+#define KVM_PIT_MEM_LENGTH 4
+#define KVM_PIT_FREQ 1193181
+#define KVM_MAX_PIT_INTR_INTERVAL HZ / 100
+#define KVM_PIT_CHANNEL_MASK 0x3
+
+void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu);
+void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
+void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val);
+struct kvm_pit *kvm_create_pit(struct kvm *kvm);
+void kvm_free_pit(struct kvm *kvm);
+void kvm_pit_reset(struct kvm_pit *pit);
+
+#endif
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index e5714759e97..ce1f583459b 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -23,6 +23,22 @@
#include <linux/kvm_host.h>
#include "irq.h"
+#include "i8254.h"
+
+/*
+ * check if there are pending timer events
+ * to be processed.
+ */
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ ret = pit_has_pending_timer(vcpu);
+ ret |= apic_has_pending_timer(vcpu);
+
+ return ret;
+}
+EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
/*
* check if there is pending interrupt without
@@ -66,6 +82,7 @@ EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
{
kvm_inject_apic_timer_irqs(vcpu);
+ kvm_inject_pit_timer_irqs(vcpu);
/* TODO: PIT, RTC etc. */
}
EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
@@ -73,6 +90,7 @@ EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
{
kvm_apic_timer_intr_post(vcpu, vec);
+ kvm_pit_timer_intr_post(vcpu, vec);
/* TODO: PIT, RTC etc. */
}
EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index fa5ed5d59b5..1802134b836 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -85,4 +85,7 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
+int pit_has_pending_timer(struct kvm_vcpu *vcpu);
+int apic_has_pending_timer(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
index ecdfe97e463..65ef0fc2c03 100644
--- a/arch/x86/kvm/kvm_svm.h
+++ b/arch/x86/kvm/kvm_svm.h
@@ -39,6 +39,8 @@ struct vcpu_svm {
unsigned long host_db_regs[NUM_DB_REGS];
unsigned long host_dr6;
unsigned long host_dr7;
+
+ u32 *msrpm;
};
#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 68a6b151193..57ac4e4c556 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -338,10 +338,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
} else
apic_clear_vector(vector, apic->regs + APIC_TMR);
- if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
+ if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
kvm_vcpu_kick(vcpu);
- else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) {
- vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+ else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
}
@@ -362,11 +362,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_INIT:
if (level) {
- if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
+ if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
printk(KERN_DEBUG
"INIT on a runnable vcpu %d\n",
vcpu->vcpu_id);
- vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED;
+ vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
kvm_vcpu_kick(vcpu);
} else {
printk(KERN_DEBUG
@@ -379,9 +379,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_STARTUP:
printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
vcpu->vcpu_id, vector);
- if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
+ if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
vcpu->arch.sipi_vector = vector;
- vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
+ vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
}
@@ -658,7 +658,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
PRIx64 ", "
"timer initial count 0x%x, period %lldns, "
- "expire @ 0x%016" PRIx64 ".\n", __FUNCTION__,
+ "expire @ 0x%016" PRIx64 ".\n", __func__,
APIC_BUS_CYCLE_NS, ktime_to_ns(now),
apic_get_reg(apic, APIC_TMICT),
apic->timer.period,
@@ -691,7 +691,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
/* too common printing */
if (offset != APIC_EOI)
apic_debug("%s: offset 0x%x with length 0x%x, and value is "
- "0x%x\n", __FUNCTION__, offset, len, val);
+ "0x%x\n", __func__, offset, len, val);
offset &= 0xff0;
@@ -822,6 +822,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
| (apic_get_reg(apic, APIC_TASKPRI) & 4));
}
+EXPORT_SYMBOL_GPL(kvm_lapic_set_tpr);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{
@@ -869,7 +870,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic;
int i;
- apic_debug("%s\n", __FUNCTION__);
+ apic_debug("%s\n", __func__);
ASSERT(vcpu);
apic = vcpu->arch.apic;
@@ -907,7 +908,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_update_ppr(apic);
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
- "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__,
+ "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
vcpu, kvm_apic_id(apic),
vcpu->arch.apic_base, apic->base_address);
}
@@ -940,7 +941,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
atomic_inc(&apic->timer.pending);
if (waitqueue_active(q)) {
- apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+ apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
wake_up_interruptible(q);
}
if (apic_lvtt_period(apic)) {
@@ -952,6 +953,16 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
return result;
}
+int apic_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *lapic = vcpu->arch.apic;
+
+ if (lapic)
+ return atomic_read(&lapic->timer.pending);
+
+ return 0;
+}
+
static int __inject_apic_timer_irq(struct kvm_lapic *apic)
{
int vector;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e55af12e11b..2ad6f548167 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -27,11 +27,22 @@
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/swap.h>
+#include <linux/hugetlb.h>
+#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/cmpxchg.h>
#include <asm/io.h>
+/*
+ * When setting this variable to true it enables Two-Dimensional-Paging
+ * where the hardware walks 2 page tables:
+ * 1. the guest-virtual to guest-physical
+ * 2. while doing 1. it walks guest-physical to host-physical
+ * If the hardware supports that we don't need to do shadow paging.
+ */
+bool tdp_enabled = false;
+
#undef MMU_DEBUG
#undef AUDIT
@@ -101,8 +112,6 @@ static int dbg = 1;
#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52
-#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
-
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
#define PT64_LEVEL_BITS 9
@@ -159,6 +168,13 @@ static int dbg = 1;
#define ACC_USER_MASK PT_USER_MASK
#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
+struct kvm_pv_mmu_op_buffer {
+ void *ptr;
+ unsigned len;
+ unsigned processed;
+ char buf[512] __aligned(sizeof(long));
+};
+
struct kvm_rmap_desc {
u64 *shadow_ptes[RMAP_EXT];
struct kvm_rmap_desc *more;
@@ -200,11 +216,15 @@ static int is_present_pte(unsigned long pte)
static int is_shadow_present_pte(u64 pte)
{
- pte &= ~PT_SHADOW_IO_MARK;
return pte != shadow_trap_nonpresent_pte
&& pte != shadow_notrap_nonpresent_pte;
}
+static int is_large_pte(u64 pte)
+{
+ return pte & PT_PAGE_SIZE_MASK;
+}
+
static int is_writeble_pte(unsigned long pte)
{
return pte & PT_WRITABLE_MASK;
@@ -215,14 +235,14 @@ static int is_dirty_pte(unsigned long pte)
return pte & PT_DIRTY_MASK;
}
-static int is_io_pte(unsigned long pte)
+static int is_rmap_pte(u64 pte)
{
- return pte & PT_SHADOW_IO_MARK;
+ return is_shadow_present_pte(pte);
}
-static int is_rmap_pte(u64 pte)
+static pfn_t spte_to_pfn(u64 pte)
{
- return is_shadow_present_pte(pte);
+ return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
}
static gfn_t pse36_gfn_delta(u32 gpte)
@@ -349,16 +369,100 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
}
/*
+ * Return the pointer to the largepage write count for a given
+ * gfn, handling slots that are not large page aligned.
+ */
+static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
+{
+ unsigned long idx;
+
+ idx = (gfn / KVM_PAGES_PER_HPAGE) -
+ (slot->base_gfn / KVM_PAGES_PER_HPAGE);
+ return &slot->lpage_info[idx].write_count;
+}
+
+static void account_shadowed(struct kvm *kvm, gfn_t gfn)
+{
+ int *write_count;
+
+ write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
+ *write_count += 1;
+ WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
+}
+
+static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
+{
+ int *write_count;
+
+ write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
+ *write_count -= 1;
+ WARN_ON(*write_count < 0);
+}
+
+static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
+{
+ struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+ int *largepage_idx;
+
+ if (slot) {
+ largepage_idx = slot_largepage_idx(gfn, slot);
+ return *largepage_idx;
+ }
+
+ return 1;
+}
+
+static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
+{
+ struct vm_area_struct *vma;
+ unsigned long addr;
+
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr))
+ return 0;
+
+ vma = find_vma(current->mm, addr);
+ if (vma && is_vm_hugetlb_page(vma))
+ return 1;
+
+ return 0;
+}
+
+static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+{
+ struct kvm_memory_slot *slot;
+
+ if (has_wrprotected_page(vcpu->kvm, large_gfn))
+ return 0;
+
+ if (!host_largepage_backed(vcpu->kvm, large_gfn))
+ return 0;
+
+ slot = gfn_to_memslot(vcpu->kvm, large_gfn);
+ if (slot && slot->dirty_bitmap)
+ return 0;
+
+ return 1;
+}
+
+/*
* Take gfn and return the reverse mapping to it.
* Note: gfn must be unaliased before this function get called
*/
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
{
struct kvm_memory_slot *slot;
+ unsigned long idx;
slot = gfn_to_memslot(kvm, gfn);
- return &slot->rmap[gfn - slot->base_gfn];
+ if (!lpage)
+ return &slot->rmap[gfn - slot->base_gfn];
+
+ idx = (gfn / KVM_PAGES_PER_HPAGE) -
+ (slot->base_gfn / KVM_PAGES_PER_HPAGE);
+
+ return &slot->lpage_info[idx].rmap_pde;
}
/*
@@ -370,7 +474,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
* If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
* containing more mappings.
*/
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
{
struct kvm_mmu_page *sp;
struct kvm_rmap_desc *desc;
@@ -382,7 +486,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
gfn = unalias_gfn(vcpu->kvm, gfn);
sp = page_header(__pa(spte));
sp->gfns[spte - sp->spt] = gfn;
- rmapp = gfn_to_rmap(vcpu->kvm, gfn);
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
if (!*rmapp) {
rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
*rmapp = (unsigned long)spte;
@@ -435,20 +539,21 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
struct kvm_rmap_desc *desc;
struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *sp;
- struct page *page;
+ pfn_t pfn;
unsigned long *rmapp;
int i;
if (!is_rmap_pte(*spte))
return;
sp = page_header(__pa(spte));
- page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
- mark_page_accessed(page);
+ pfn = spte_to_pfn(*spte);
+ if (*spte & PT_ACCESSED_MASK)
+ kvm_set_pfn_accessed(pfn);
if (is_writeble_pte(*spte))
- kvm_release_page_dirty(page);
+ kvm_release_pfn_dirty(pfn);
else
- kvm_release_page_clean(page);
- rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
+ kvm_release_pfn_clean(pfn);
+ rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
BUG();
@@ -514,7 +619,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
int write_protected = 0;
gfn = unalias_gfn(kvm, gfn);
- rmapp = gfn_to_rmap(kvm, gfn);
+ rmapp = gfn_to_rmap(kvm, gfn, 0);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
@@ -527,8 +632,35 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
}
spte = rmap_next(kvm, rmapp, spte);
}
+ if (write_protected) {
+ pfn_t pfn;
+
+ spte = rmap_next(kvm, rmapp, NULL);
+ pfn = spte_to_pfn(*spte);
+ kvm_set_pfn_dirty(pfn);
+ }
+
+ /* check for huge page mappings */
+ rmapp = gfn_to_rmap(kvm, gfn, 1);
+ spte = rmap_next(kvm, rmapp, NULL);
+ while (spte) {
+ BUG_ON(!spte);
+ BUG_ON(!(*spte & PT_PRESENT_MASK));
+ BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
+ pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
+ if (is_writeble_pte(*spte)) {
+ rmap_remove(kvm, spte);
+ --kvm->stat.lpages;
+ set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ write_protected = 1;
+ }
+ spte = rmap_next(kvm, rmapp, spte);
+ }
+
if (write_protected)
kvm_flush_remote_tlbs(kvm);
+
+ account_shadowed(kvm, gfn);
}
#ifdef MMU_DEBUG
@@ -538,8 +670,8 @@ static int is_empty_shadow_page(u64 *spt)
u64 *end;
for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
- if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
- printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
+ if (*pos != shadow_trap_nonpresent_pte) {
+ printk(KERN_ERR "%s: %p %llx\n", __func__,
pos, *pos);
return 0;
}
@@ -559,7 +691,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
- return gfn;
+ return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
}
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
@@ -662,13 +794,14 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
struct kvm_mmu_page *sp;
struct hlist_node *node;
- pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
- index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+ pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
+ index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
hlist_for_each_entry(sp, node, bucket, hash_link)
- if (sp->gfn == gfn && !sp->role.metaphysical) {
+ if (sp->gfn == gfn && !sp->role.metaphysical
+ && !sp->role.invalid) {
pgprintk("%s: found role %x\n",
- __FUNCTION__, sp->role.word);
+ __func__, sp->role.word);
return sp;
}
return NULL;
@@ -699,27 +832,27 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
}
- pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
+ pgprintk("%s: looking gfn %lx role %x\n", __func__,
gfn, role.word);
- index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+ index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry(sp, node, bucket, hash_link)
if (sp->gfn == gfn && sp->role.word == role.word) {
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
- pgprintk("%s: found\n", __FUNCTION__);
+ pgprintk("%s: found\n", __func__);
return sp;
}
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte);
if (!sp)
return sp;
- pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
+ pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
sp->gfn = gfn;
sp->role = role;
hlist_add_head(&sp->hash_link, bucket);
- vcpu->arch.mmu.prefetch_page(vcpu, sp);
if (!metaphysical)
rmap_write_protect(vcpu->kvm, gfn);
+ vcpu->arch.mmu.prefetch_page(vcpu, sp);
return sp;
}
@@ -745,11 +878,17 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
ent = pt[i];
+ if (is_shadow_present_pte(ent)) {
+ if (!is_large_pte(ent)) {
+ ent &= PT64_BASE_ADDR_MASK;
+ mmu_page_remove_parent_pte(page_header(ent),
+ &pt[i]);
+ } else {
+ --kvm->stat.lpages;
+ rmap_remove(kvm, &pt[i]);
+ }
+ }
pt[i] = shadow_trap_nonpresent_pte;
- if (!is_shadow_present_pte(ent))
- continue;
- ent &= PT64_BASE_ADDR_MASK;
- mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
}
kvm_flush_remote_tlbs(kvm);
}
@@ -789,10 +928,15 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
}
kvm_mmu_page_unlink_children(kvm, sp);
if (!sp->root_count) {
+ if (!sp->role.metaphysical)
+ unaccount_shadowed(kvm, sp->gfn);
hlist_del(&sp->hash_link);
kvm_mmu_free_page(kvm, sp);
- } else
+ } else {
list_move(&sp->link, &kvm->arch.active_mmu_pages);
+ sp->role.invalid = 1;
+ kvm_reload_remote_mmus(kvm);
+ }
kvm_mmu_reset_last_pte_updated(kvm);
}
@@ -838,13 +982,13 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
struct hlist_node *node, *n;
int r;
- pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
+ pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
r = 0;
- index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+ index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
if (sp->gfn == gfn && !sp->role.metaphysical) {
- pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
+ pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
sp->role.word);
kvm_mmu_zap_page(kvm, sp);
r = 1;
@@ -857,7 +1001,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
struct kvm_mmu_page *sp;
while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
- pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
+ pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
kvm_mmu_zap_page(kvm, sp);
}
}
@@ -889,26 +1033,39 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
unsigned pt_access, unsigned pte_access,
int user_fault, int write_fault, int dirty,
- int *ptwrite, gfn_t gfn, struct page *page)
+ int *ptwrite, int largepage, gfn_t gfn,
+ pfn_t pfn, bool speculative)
{
u64 spte;
int was_rmapped = 0;
int was_writeble = is_writeble_pte(*shadow_pte);
- hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
pgprintk("%s: spte %llx access %x write_fault %d"
" user_fault %d gfn %lx\n",
- __FUNCTION__, *shadow_pte, pt_access,
+ __func__, *shadow_pte, pt_access,
write_fault, user_fault, gfn);
if (is_rmap_pte(*shadow_pte)) {
- if (host_pfn != page_to_pfn(page)) {
+ /*
+ * If we overwrite a PTE page pointer with a 2MB PMD, unlink
+ * the parent of the now unreachable PTE.
+ */
+ if (largepage && !is_large_pte(*shadow_pte)) {
+ struct kvm_mmu_page *child;
+ u64 pte = *shadow_pte;
+
+ child = page_header(pte & PT64_BASE_ADDR_MASK);
+ mmu_page_remove_parent_pte(child, shadow_pte);
+ } else if (pfn != spte_to_pfn(*shadow_pte)) {
pgprintk("hfn old %lx new %lx\n",
- host_pfn, page_to_pfn(page));
+ spte_to_pfn(*shadow_pte), pfn);
rmap_remove(vcpu->kvm, shadow_pte);
+ } else {
+ if (largepage)
+ was_rmapped = is_large_pte(*shadow_pte);
+ else
+ was_rmapped = 1;
}
- else
- was_rmapped = 1;
}
/*
@@ -917,6 +1074,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
* demand paging).
*/
spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
+ if (!speculative)
+ pte_access |= PT_ACCESSED_MASK;
if (!dirty)
pte_access &= ~ACC_WRITE_MASK;
if (!(pte_access & ACC_EXEC_MASK))
@@ -925,15 +1084,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
spte |= PT_PRESENT_MASK;
if (pte_access & ACC_USER_MASK)
spte |= PT_USER_MASK;
+ if (largepage)
+ spte |= PT_PAGE_SIZE_MASK;
- if (is_error_page(page)) {
- set_shadow_pte(shadow_pte,
- shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
- kvm_release_page_clean(page);
- return;
- }
-
- spte |= page_to_phys(page);
+ spte |= (u64)pfn << PAGE_SHIFT;
if ((pte_access & ACC_WRITE_MASK)
|| (write_fault && !is_write_protection(vcpu) && !user_fault)) {
@@ -946,9 +1100,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
}
shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
- if (shadow) {
+ if (shadow ||
+ (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
pgprintk("%s: found shadow page for %lx, marking ro\n",
- __FUNCTION__, gfn);
+ __func__, gfn);
pte_access &= ~ACC_WRITE_MASK;
if (is_writeble_pte(spte)) {
spte &= ~PT_WRITABLE_MASK;
@@ -964,18 +1119,25 @@ unshadowed:
if (pte_access & ACC_WRITE_MASK)
mark_page_dirty(vcpu->kvm, gfn);
- pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
+ pgprintk("%s: setting spte %llx\n", __func__, spte);
+ pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
+ (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
+ (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
set_shadow_pte(shadow_pte, spte);
+ if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
+ && (spte & PT_PRESENT_MASK))
+ ++vcpu->kvm->stat.lpages;
+
page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
if (!was_rmapped) {
- rmap_add(vcpu, shadow_pte, gfn);
+ rmap_add(vcpu, shadow_pte, gfn, largepage);
if (!is_rmap_pte(*shadow_pte))
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
} else {
if (was_writeble)
- kvm_release_page_dirty(page);
+ kvm_release_pfn_dirty(pfn);
else
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
}
if (!ptwrite || !*ptwrite)
vcpu->arch.last_pte_updated = shadow_pte;
@@ -985,10 +1147,10 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}
-static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
- gfn_t gfn, struct page *page)
+static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
+ int largepage, gfn_t gfn, pfn_t pfn,
+ int level)
{
- int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->arch.mmu.root_hpa;
int pt_write = 0;
@@ -1001,8 +1163,14 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
if (level == 1) {
mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
- 0, write, 1, &pt_write, gfn, page);
- return pt_write || is_io_pte(table[index]);
+ 0, write, 1, &pt_write, 0, gfn, pfn, false);
+ return pt_write;
+ }
+
+ if (largepage && level == 2) {
+ mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
+ 0, write, 1, &pt_write, 1, gfn, pfn, false);
+ return pt_write;
}
if (table[index] == shadow_trap_nonpresent_pte) {
@@ -1016,7 +1184,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
1, ACC_ALL, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
return -ENOMEM;
}
@@ -1030,21 +1198,30 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{
int r;
-
- struct page *page;
-
- down_read(&vcpu->kvm->slots_lock);
+ int largepage = 0;
+ pfn_t pfn;
down_read(&current->mm->mmap_sem);
- page = gfn_to_page(vcpu->kvm, gfn);
+ if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
+ gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+ largepage = 1;
+ }
+
+ pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
+ /* mmio */
+ if (is_error_pfn(pfn)) {
+ kvm_release_pfn_clean(pfn);
+ return 1;
+ }
+
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
- r = __nonpaging_map(vcpu, v, write, gfn, page);
+ r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
+ PT32E_ROOT_LEVEL);
spin_unlock(&vcpu->kvm->mmu_lock);
- up_read(&vcpu->kvm->slots_lock);
return r;
}
@@ -1073,6 +1250,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
sp = page_header(root);
--sp->root_count;
+ if (!sp->root_count && sp->role.invalid)
+ kvm_mmu_zap_page(vcpu->kvm, sp);
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
spin_unlock(&vcpu->kvm->mmu_lock);
return;
@@ -1085,6 +1264,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
root &= PT64_BASE_ADDR_MASK;
sp = page_header(root);
--sp->root_count;
+ if (!sp->root_count && sp->role.invalid)
+ kvm_mmu_zap_page(vcpu->kvm, sp);
}
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
}
@@ -1097,6 +1278,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
int i;
gfn_t root_gfn;
struct kvm_mmu_page *sp;
+ int metaphysical = 0;
root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
@@ -1105,14 +1287,20 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
hpa_t root = vcpu->arch.mmu.root_hpa;
ASSERT(!VALID_PAGE(root));
+ if (tdp_enabled)
+ metaphysical = 1;
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
- PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
+ PT64_ROOT_LEVEL, metaphysical,
+ ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
vcpu->arch.mmu.root_hpa = root;
return;
}
#endif
+ metaphysical = !is_paging(vcpu);
+ if (tdp_enabled)
+ metaphysical = 1;
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
@@ -1126,7 +1314,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
} else if (vcpu->arch.mmu.root_level == 0)
root_gfn = 0;
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
- PT32_ROOT_LEVEL, !is_paging(vcpu),
+ PT32_ROOT_LEVEL, metaphysical,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
@@ -1146,7 +1334,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
gfn_t gfn;
int r;
- pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
+ pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
@@ -1160,6 +1348,41 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
error_code & PFERR_WRITE_MASK, gfn);
}
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
+ u32 error_code)
+{
+ pfn_t pfn;
+ int r;
+ int largepage = 0;
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+
+ ASSERT(vcpu);
+ ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+
+ r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ return r;
+
+ down_read(&current->mm->mmap_sem);
+ if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
+ gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+ largepage = 1;
+ }
+ pfn = gfn_to_pfn(vcpu->kvm, gfn);
+ up_read(&current->mm->mmap_sem);
+ if (is_error_pfn(pfn)) {
+ kvm_release_pfn_clean(pfn);
+ return 1;
+ }
+ spin_lock(&vcpu->kvm->mmu_lock);
+ kvm_mmu_free_some_pages(vcpu);
+ r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
+ largepage, gfn, pfn, TDP_ROOT_LEVEL);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
+ return r;
+}
+
static void nonpaging_free(struct kvm_vcpu *vcpu)
{
mmu_free_roots(vcpu);
@@ -1188,7 +1411,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
- pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
+ pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
mmu_free_roots(vcpu);
}
@@ -1253,7 +1476,35 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
}
-static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu *context = &vcpu->arch.mmu;
+
+ context->new_cr3 = nonpaging_new_cr3;
+ context->page_fault = tdp_page_fault;
+ context->free = nonpaging_free;
+ context->prefetch_page = nonpaging_prefetch_page;
+ context->shadow_root_level = TDP_ROOT_LEVEL;
+ context->root_hpa = INVALID_PAGE;
+
+ if (!is_paging(vcpu)) {
+ context->gva_to_gpa = nonpaging_gva_to_gpa;
+ context->root_level = 0;
+ } else if (is_long_mode(vcpu)) {
+ context->gva_to_gpa = paging64_gva_to_gpa;
+ context->root_level = PT64_ROOT_LEVEL;
+ } else if (is_pae(vcpu)) {
+ context->gva_to_gpa = paging64_gva_to_gpa;
+ context->root_level = PT32E_ROOT_LEVEL;
+ } else {
+ context->gva_to_gpa = paging32_gva_to_gpa;
+ context->root_level = PT32_ROOT_LEVEL;
+ }
+
+ return 0;
+}
+
+static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1268,6 +1519,16 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
return paging32_init_context(vcpu);
}
+static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.update_pte.pfn = bad_pfn;
+
+ if (tdp_enabled)
+ return init_kvm_tdp_mmu(vcpu);
+ else
+ return init_kvm_softmmu(vcpu);
+}
+
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
@@ -1316,7 +1577,8 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
pte = *spte;
if (is_shadow_present_pte(pte)) {
- if (sp->role.level == PT_PAGE_TABLE_LEVEL)
+ if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
+ is_large_pte(pte))
rmap_remove(vcpu->kvm, spte);
else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
@@ -1324,24 +1586,26 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
}
}
set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ if (is_large_pte(pte))
+ --vcpu->kvm->stat.lpages;
}
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *spte,
- const void *new, int bytes,
- int offset_in_pte)
+ const void *new)
{
- if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
+ if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
+ && !vcpu->arch.update_pte.largepage) {
++vcpu->kvm->stat.mmu_pde_zapped;
return;
}
++vcpu->kvm->stat.mmu_pte_updated;
if (sp->role.glevels == PT32_ROOT_LEVEL)
- paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+ paging32_update_pte(vcpu, sp, spte, new);
else
- paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+ paging64_update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
@@ -1378,7 +1642,9 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn_t gfn;
int r;
u64 gpte = 0;
- struct page *page;
+ pfn_t pfn;
+
+ vcpu->arch.update_pte.largepage = 0;
if (bytes != 4 && bytes != 8)
return;
@@ -1408,11 +1674,19 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
down_read(&current->mm->mmap_sem);
- page = gfn_to_page(vcpu->kvm, gfn);
+ if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
+ gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+ vcpu->arch.update_pte.largepage = 1;
+ }
+ pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
+ if (is_error_pfn(pfn)) {
+ kvm_release_pfn_clean(pfn);
+ return;
+ }
vcpu->arch.update_pte.gfn = gfn;
- vcpu->arch.update_pte.page = page;
+ vcpu->arch.update_pte.pfn = pfn;
}
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1423,7 +1697,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct hlist_node *node, *n;
struct hlist_head *bucket;
unsigned index;
- u64 entry;
+ u64 entry, gentry;
u64 *spte;
unsigned offset = offset_in_page(gpa);
unsigned pte_size;
@@ -1433,8 +1707,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int level;
int flooded = 0;
int npte;
+ int r;
- pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+ pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
@@ -1450,7 +1725,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu->arch.last_pt_write_count = 1;
vcpu->arch.last_pte_updated = NULL;
}
- index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+ index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
if (sp->gfn != gfn || sp->role.metaphysical)
@@ -1496,20 +1771,29 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
continue;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
+ if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
+ gentry = 0;
+ r = kvm_read_guest_atomic(vcpu->kvm,
+ gpa & ~(u64)(pte_size - 1),
+ &gentry, pte_size);
+ new = (const void *)&gentry;
+ if (r < 0)
+ new = NULL;
+ }
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
- mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
- page_offset & (pte_size - 1));
+ if (new)
+ mmu_pte_write_new_pte(vcpu, sp, spte, new);
mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++spte;
}
}
kvm_mmu_audit(vcpu, "post pte write");
spin_unlock(&vcpu->kvm->mmu_lock);
- if (vcpu->arch.update_pte.page) {
- kvm_release_page_clean(vcpu->arch.update_pte.page);
- vcpu->arch.update_pte.page = NULL;
+ if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
+ kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
+ vcpu->arch.update_pte.pfn = bad_pfn;
}
}
@@ -1518,9 +1802,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t gpa;
int r;
- down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
- up_read(&vcpu->kvm->slots_lock);
spin_lock(&vcpu->kvm->mmu_lock);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -1577,6 +1859,12 @@ out:
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
+void kvm_enable_tdp(void)
+{
+ tdp_enabled = true;
+}
+EXPORT_SYMBOL_GPL(kvm_enable_tdp);
+
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
@@ -1677,7 +1965,53 @@ void kvm_mmu_zap_all(struct kvm *kvm)
kvm_flush_remote_tlbs(kvm);
}
-void kvm_mmu_module_exit(void)
+void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+{
+ struct kvm_mmu_page *page;
+
+ page = container_of(kvm->arch.active_mmu_pages.prev,
+ struct kvm_mmu_page, link);
+ kvm_mmu_zap_page(kvm, page);
+}
+
+static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+ struct kvm *kvm;
+ struct kvm *kvm_freed = NULL;
+ int cache_count = 0;
+
+ spin_lock(&kvm_lock);
+
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ int npages;
+
+ spin_lock(&kvm->mmu_lock);
+ npages = kvm->arch.n_alloc_mmu_pages -
+ kvm->arch.n_free_mmu_pages;
+ cache_count += npages;
+ if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
+ kvm_mmu_remove_one_alloc_mmu_page(kvm);
+ cache_count--;
+ kvm_freed = kvm;
+ }
+ nr_to_scan--;
+
+ spin_unlock(&kvm->mmu_lock);
+ }
+ if (kvm_freed)
+ list_move_tail(&kvm_freed->vm_list, &vm_list);
+
+ spin_unlock(&kvm_lock);
+
+ return cache_count;
+}
+
+static struct shrinker mmu_shrinker = {
+ .shrink = mmu_shrink,
+ .seeks = DEFAULT_SEEKS * 10,
+};
+
+void mmu_destroy_caches(void)
{
if (pte_chain_cache)
kmem_cache_destroy(pte_chain_cache);
@@ -1687,6 +2021,12 @@ void kvm_mmu_module_exit(void)
kmem_cache_destroy(mmu_page_header_cache);
}
+void kvm_mmu_module_exit(void)
+{
+ mmu_destroy_caches();
+ unregister_shrinker(&mmu_shrinker);
+}
+
int kvm_mmu_module_init(void)
{
pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@@ -1706,10 +2046,12 @@ int kvm_mmu_module_init(void)
if (!mmu_page_header_cache)
goto nomem;
+ register_shrinker(&mmu_shrinker);
+
return 0;
nomem:
- kvm_mmu_module_exit();
+ mmu_destroy_caches();
return -ENOMEM;
}
@@ -1732,6 +2074,127 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
return nr_mmu_pages;
}
+static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
+ unsigned len)
+{
+ if (len > buffer->len)
+ return NULL;
+ return buffer->ptr;
+}
+
+static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
+ unsigned len)
+{
+ void *ret;
+
+ ret = pv_mmu_peek_buffer(buffer, len);
+ if (!ret)
+ return ret;
+ buffer->ptr += len;
+ buffer->len -= len;
+ buffer->processed += len;
+ return ret;
+}
+
+static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
+ gpa_t addr, gpa_t value)
+{
+ int bytes = 8;
+ int r;
+
+ if (!is_long_mode(vcpu) && !is_pae(vcpu))
+ bytes = 4;
+
+ r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ return r;
+
+ if (!emulator_write_phys(vcpu, addr, &value, bytes))
+ return -EFAULT;
+
+ return 1;
+}
+
+static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->tlb_flush(vcpu);
+ return 1;
+}
+
+static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
+{
+ spin_lock(&vcpu->kvm->mmu_lock);
+ mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ return 1;
+}
+
+static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
+ struct kvm_pv_mmu_op_buffer *buffer)
+{
+ struct kvm_mmu_op_header *header;
+
+ header = pv_mmu_peek_buffer(buffer, sizeof *header);
+ if (!header)
+ return 0;
+ switch (header->op) {
+ case KVM_MMU_OP_WRITE_PTE: {
+ struct kvm_mmu_op_write_pte *wpte;
+
+ wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
+ if (!wpte)
+ return 0;
+ return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
+ wpte->pte_val);
+ }
+ case KVM_MMU_OP_FLUSH_TLB: {
+ struct kvm_mmu_op_flush_tlb *ftlb;
+
+ ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
+ if (!ftlb)
+ return 0;
+ return kvm_pv_mmu_flush_tlb(vcpu);
+ }
+ case KVM_MMU_OP_RELEASE_PT: {
+ struct kvm_mmu_op_release_pt *rpt;
+
+ rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
+ if (!rpt)
+ return 0;
+ return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
+ }
+ default: return 0;
+ }
+}
+
+int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
+ gpa_t addr, unsigned long *ret)
+{
+ int r;
+ struct kvm_pv_mmu_op_buffer buffer;
+
+ buffer.ptr = buffer.buf;
+ buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
+ buffer.processed = 0;
+
+ r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
+ if (r)
+ goto out;
+
+ while (buffer.len) {
+ r = kvm_pv_mmu_op_one(vcpu, &buffer);
+ if (r < 0)
+ goto out;
+ if (r == 0)
+ break;
+ }
+
+ r = 1;
+out:
+ *ret = buffer.processed;
+ return r;
+}
+
#ifdef AUDIT
static const char *audit_msg;
@@ -1768,8 +2231,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
audit_mappings_page(vcpu, ent, va, level - 1);
} else {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
- struct page *page = gpa_to_page(vcpu, gpa);
- hpa_t hpa = page_to_phys(page);
+ hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
if (is_shadow_present_pte(ent)
&& (ent & PT64_BASE_ADDR_MASK) != hpa)
@@ -1782,7 +2244,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
&& !is_error_hpa(hpa))
printk(KERN_ERR "audit: (%s) notrap shadow,"
" valid guest gva %lx\n", audit_msg, va);
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
}
}
@@ -1867,7 +2329,7 @@ static void audit_rmap(struct kvm_vcpu *vcpu)
if (n_rmap != n_actual)
printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
- __FUNCTION__, audit_msg, n_rmap, n_actual);
+ __func__, audit_msg, n_rmap, n_actual);
}
static void audit_write_protection(struct kvm_vcpu *vcpu)
@@ -1887,7 +2349,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
if (*rmapp)
printk(KERN_ERR "%s: (%s) shadow page has writable"
" mappings: gfn %lx role %x\n",
- __FUNCTION__, audit_msg, sp->gfn,
+ __func__, audit_msg, sp->gfn,
sp->role.word);
}
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 1fce19ec7a2..e64e9f56a65 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -3,6 +3,12 @@
#include <linux/kvm_host.h>
+#ifdef CONFIG_X86_64
+#define TDP_ROOT_LEVEL PT64_ROOT_LEVEL
+#else
+#define TDP_ROOT_LEVEL PT32E_ROOT_LEVEL
+#endif
+
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ecc0856268c..156fe10288a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -130,7 +130,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
unsigned index, pt_access, pte_access;
gpa_t pte_gpa;
- pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
+ pgprintk("%s: addr %lx\n", __func__, addr);
walk:
walker->level = vcpu->arch.mmu.root_level;
pte = vcpu->arch.cr3;
@@ -155,7 +155,7 @@ walk:
pte_gpa += index * sizeof(pt_element_t);
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
- pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
+ pgprintk("%s: table_gfn[%d] %lx\n", __func__,
walker->level - 1, table_gfn);
kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
@@ -222,7 +222,7 @@ walk:
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
- __FUNCTION__, (u64)pte, pt_access, pte_access);
+ __func__, (u64)pte, pt_access, pte_access);
return 1;
not_present:
@@ -243,31 +243,30 @@ err:
}
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
- u64 *spte, const void *pte, int bytes,
- int offset_in_pte)
+ u64 *spte, const void *pte)
{
pt_element_t gpte;
unsigned pte_access;
- struct page *npage;
+ pfn_t pfn;
+ int largepage = vcpu->arch.update_pte.largepage;
gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
- if (!offset_in_pte && !is_present_pte(gpte))
+ if (!is_present_pte(gpte))
set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
return;
}
- if (bytes < sizeof(pt_element_t))
- return;
- pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
+ pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
return;
- npage = vcpu->arch.update_pte.page;
- if (!npage)
+ pfn = vcpu->arch.update_pte.pfn;
+ if (is_error_pfn(pfn))
return;
- get_page(npage);
+ kvm_get_pfn(pfn);
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
- gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
+ gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
+ pfn, true);
}
/*
@@ -275,8 +274,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
*/
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *walker,
- int user_fault, int write_fault, int *ptwrite,
- struct page *page)
+ int user_fault, int write_fault, int largepage,
+ int *ptwrite, pfn_t pfn)
{
hpa_t shadow_addr;
int level;
@@ -304,11 +303,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_ent = ((u64 *)__va(shadow_addr)) + index;
if (level == PT_PAGE_TABLE_LEVEL)
break;
- if (is_shadow_present_pte(*shadow_ent)) {
+
+ if (largepage && level == PT_DIRECTORY_LEVEL)
+ break;
+
+ if (is_shadow_present_pte(*shadow_ent)
+ && !is_large_pte(*shadow_ent)) {
shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
continue;
}
+ if (is_large_pte(*shadow_ent))
+ rmap_remove(vcpu->kvm, shadow_ent);
+
if (level - 1 == PT_PAGE_TABLE_LEVEL
&& walker->level == PT_DIRECTORY_LEVEL) {
metaphysical = 1;
@@ -329,7 +336,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
walker->pte_gpa[level - 2],
&curr_pte, sizeof(curr_pte));
if (r || curr_pte != walker->ptes[level - 2]) {
- kvm_release_page_clean(page);
+ kvm_release_pfn_clean(pfn);
return NULL;
}
}
@@ -342,7 +349,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
user_fault, write_fault,
walker->ptes[walker->level-1] & PT_DIRTY_MASK,
- ptwrite, walker->gfn, page);
+ ptwrite, largepage, walker->gfn, pfn, false);
return shadow_ent;
}
@@ -371,16 +378,16 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
u64 *shadow_pte;
int write_pt = 0;
int r;
- struct page *page;
+ pfn_t pfn;
+ int largepage = 0;
- pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
+ pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
kvm_mmu_audit(vcpu, "pre page fault");
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
- down_read(&vcpu->kvm->slots_lock);
/*
* Look up the shadow pte for the faulting address.
*/
@@ -391,40 +398,45 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
* The page is not mapped by the guest. Let the guest handle it.
*/
if (!r) {
- pgprintk("%s: guest page fault\n", __FUNCTION__);
+ pgprintk("%s: guest page fault\n", __func__);
inject_page_fault(vcpu, addr, walker.error_code);
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
- up_read(&vcpu->kvm->slots_lock);
return 0;
}
down_read(&current->mm->mmap_sem);
- page = gfn_to_page(vcpu->kvm, walker.gfn);
+ if (walker.level == PT_DIRECTORY_LEVEL) {
+ gfn_t large_gfn;
+ large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
+ if (is_largepage_backed(vcpu, large_gfn)) {
+ walker.gfn = large_gfn;
+ largepage = 1;
+ }
+ }
+ pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
up_read(&current->mm->mmap_sem);
+ /* mmio */
+ if (is_error_pfn(pfn)) {
+ pgprintk("gfn %x is mmio\n", walker.gfn);
+ kvm_release_pfn_clean(pfn);
+ return 1;
+ }
+
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
- &write_pt, page);
- pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
+ largepage, &write_pt, pfn);
+
+ pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
shadow_pte, *shadow_pte, write_pt);
if (!write_pt)
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
- /*
- * mmio: emulate if accessible, otherwise its a guest fault.
- */
- if (shadow_pte && is_io_pte(*shadow_pte)) {
- spin_unlock(&vcpu->kvm->mmu_lock);
- up_read(&vcpu->kvm->slots_lock);
- return 1;
- }
-
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)");
spin_unlock(&vcpu->kvm->mmu_lock);
- up_read(&vcpu->kvm->slots_lock);
return write_pt;
}
diff --git a/arch/x86/kvm/segment_descriptor.h b/arch/x86/kvm/segment_descriptor.h
deleted file mode 100644
index 56fc4c87338..00000000000
--- a/arch/x86/kvm/segment_descriptor.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __SEGMENT_DESCRIPTOR_H
-#define __SEGMENT_DESCRIPTOR_H
-
-struct segment_descriptor {
- u16 limit_low;
- u16 base_low;
- u8 base_mid;
- u8 type : 4;
- u8 system : 1;
- u8 dpl : 2;
- u8 present : 1;
- u8 limit_high : 4;
- u8 avl : 1;
- u8 long_mode : 1;
- u8 default_op : 1;
- u8 granularity : 1;
- u8 base_high;
-} __attribute__((packed));
-
-#ifdef CONFIG_X86_64
-/* LDT or TSS descriptor in the GDT. 16 bytes. */
-struct segment_descriptor_64 {
- struct segment_descriptor s;
- u32 base_higher;
- u32 pad_zero;
-};
-
-#endif
-#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1a582f1090e..89e0be2c10d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -47,6 +47,18 @@ MODULE_LICENSE("GPL");
#define SVM_FEATURE_LBRV (1 << 1)
#define SVM_DEATURE_SVML (1 << 2)
+#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
+
+/* enable NPT for AMD64 and X86 with PAE */
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+static bool npt_enabled = true;
+#else
+static bool npt_enabled = false;
+#endif
+static int npt = 1;
+
+module_param(npt, int, S_IRUGO);
+
static void kvm_reput_irq(struct vcpu_svm *svm);
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
@@ -54,8 +66,7 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_svm, vcpu);
}
-unsigned long iopm_base;
-unsigned long msrpm_base;
+static unsigned long iopm_base;
struct kvm_ldttss_desc {
u16 limit0;
@@ -182,7 +193,7 @@ static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (!(efer & EFER_LMA))
+ if (!npt_enabled && !(efer & EFER_LMA))
efer &= ~EFER_LME;
to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
@@ -219,12 +230,12 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
if (!svm->next_rip) {
- printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
+ printk(KERN_DEBUG "%s: NOP\n", __func__);
return;
}
if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE)
printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
- __FUNCTION__,
+ __func__,
svm->vmcb->save.rip,
svm->next_rip);
@@ -279,11 +290,7 @@ static void svm_hardware_enable(void *garbage)
struct svm_cpu_data *svm_data;
uint64_t efer;
-#ifdef CONFIG_X86_64
- struct desc_ptr gdt_descr;
-#else
struct desc_ptr gdt_descr;
-#endif
struct desc_struct *gdt;
int me = raw_smp_processor_id();
@@ -302,7 +309,6 @@ static void svm_hardware_enable(void *garbage)
svm_data->asid_generation = 1;
svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
svm_data->next_asid = svm_data->max_asid + 1;
- svm_features = cpuid_edx(SVM_CPUID_FUNC);
asm volatile ("sgdt %0" : "=m"(gdt_descr));
gdt = (struct desc_struct *)gdt_descr.address;
@@ -361,12 +367,51 @@ static void set_msr_interception(u32 *msrpm, unsigned msr,
BUG();
}
+static void svm_vcpu_init_msrpm(u32 *msrpm)
+{
+ memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+
+#ifdef CONFIG_X86_64
+ set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
+ set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
+ set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
+ set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
+ set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
+ set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
+#endif
+ set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
+}
+
+static void svm_enable_lbrv(struct vcpu_svm *svm)
+{
+ u32 *msrpm = svm->msrpm;
+
+ svm->vmcb->control.lbr_ctl = 1;
+ set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+ set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+}
+
+static void svm_disable_lbrv(struct vcpu_svm *svm)
+{
+ u32 *msrpm = svm->msrpm;
+
+ svm->vmcb->control.lbr_ctl = 0;
+ set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
+ set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
+ set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
+ set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
+}
+
static __init int svm_hardware_setup(void)
{
int cpu;
struct page *iopm_pages;
- struct page *msrpm_pages;
- void *iopm_va, *msrpm_va;
+ void *iopm_va;
int r;
iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
@@ -379,41 +424,33 @@ static __init int svm_hardware_setup(void)
clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
+ if (boot_cpu_has(X86_FEATURE_NX))
+ kvm_enable_efer_bits(EFER_NX);
- msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+ for_each_online_cpu(cpu) {
+ r = svm_cpu_init(cpu);
+ if (r)
+ goto err;
+ }
- r = -ENOMEM;
- if (!msrpm_pages)
- goto err_1;
+ svm_features = cpuid_edx(SVM_CPUID_FUNC);
- msrpm_va = page_address(msrpm_pages);
- memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
- msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
+ if (!svm_has(SVM_FEATURE_NPT))
+ npt_enabled = false;
-#ifdef CONFIG_X86_64
- set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
- set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
- set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
- set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
- set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
- set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
-#endif
- set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
- set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
- set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
- set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
+ if (npt_enabled && !npt) {
+ printk(KERN_INFO "kvm: Nested Paging disabled\n");
+ npt_enabled = false;
+ }
- for_each_online_cpu(cpu) {
- r = svm_cpu_init(cpu);
- if (r)
- goto err_2;
+ if (npt_enabled) {
+ printk(KERN_INFO "kvm: Nested Paging enabled\n");
+ kvm_enable_tdp();
}
+
return 0;
-err_2:
- __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
- msrpm_base = 0;
-err_1:
+err:
__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
iopm_base = 0;
return r;
@@ -421,9 +458,8 @@ err_1:
static __exit void svm_hardware_unsetup(void)
{
- __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
- iopm_base = msrpm_base = 0;
+ iopm_base = 0;
}
static void init_seg(struct vmcb_seg *seg)
@@ -443,15 +479,14 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
seg->base = 0;
}
-static void init_vmcb(struct vmcb *vmcb)
+static void init_vmcb(struct vcpu_svm *svm)
{
- struct vmcb_control_area *control = &vmcb->control;
- struct vmcb_save_area *save = &vmcb->save;
+ struct vmcb_control_area *control = &svm->vmcb->control;
+ struct vmcb_save_area *save = &svm->vmcb->save;
control->intercept_cr_read = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
- INTERCEPT_CR4_MASK |
- INTERCEPT_CR8_MASK;
+ INTERCEPT_CR4_MASK;
control->intercept_cr_write = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
@@ -471,23 +506,13 @@ static void init_vmcb(struct vmcb *vmcb)
INTERCEPT_DR7_MASK;
control->intercept_exceptions = (1 << PF_VECTOR) |
- (1 << UD_VECTOR);
+ (1 << UD_VECTOR) |
+ (1 << MC_VECTOR);
control->intercept = (1ULL << INTERCEPT_INTR) |
(1ULL << INTERCEPT_NMI) |
(1ULL << INTERCEPT_SMI) |
- /*
- * selective cr0 intercept bug?
- * 0: 0f 22 d8 mov %eax,%cr3
- * 3: 0f 20 c0 mov %cr0,%eax
- * 6: 0d 00 00 00 80 or $0x80000000,%eax
- * b: 0f 22 c0 mov %eax,%cr0
- * set cr3 ->interception
- * get cr0 ->interception
- * set cr0 -> no interception
- */
- /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
(1ULL << INTERCEPT_CPUID) |
(1ULL << INTERCEPT_INVD) |
(1ULL << INTERCEPT_HLT) |
@@ -508,7 +533,7 @@ static void init_vmcb(struct vmcb *vmcb)
(1ULL << INTERCEPT_MWAIT);
control->iopm_base_pa = iopm_base;
- control->msrpm_base_pa = msrpm_base;
+ control->msrpm_base_pa = __pa(svm->msrpm);
control->tsc_offset = 0;
control->int_ctl = V_INTR_MASKING_MASK;
@@ -550,13 +575,30 @@ static void init_vmcb(struct vmcb *vmcb)
save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
+
+ if (npt_enabled) {
+ /* Setup VMCB for Nested Paging */
+ control->nested_ctl = 1;
+ control->intercept &= ~(1ULL << INTERCEPT_TASK_SWITCH);
+ control->intercept_exceptions &= ~(1 << PF_VECTOR);
+ control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
+ INTERCEPT_CR3_MASK);
+ control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
+ INTERCEPT_CR3_MASK);
+ save->g_pat = 0x0007040600070406ULL;
+ /* enable caching because the QEMU Bios doesn't enable it */
+ save->cr0 = X86_CR0_ET;
+ save->cr3 = 0;
+ save->cr4 = 0;
+ }
+ force_new_asid(&svm->vcpu);
}
static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- init_vmcb(svm->vmcb);
+ init_vmcb(svm);
if (vcpu->vcpu_id != 0) {
svm->vmcb->save.rip = 0;
@@ -571,6 +613,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
{
struct vcpu_svm *svm;
struct page *page;
+ struct page *msrpm_pages;
int err;
svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -589,12 +632,19 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
goto uninit;
}
+ err = -ENOMEM;
+ msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+ if (!msrpm_pages)
+ goto uninit;
+ svm->msrpm = page_address(msrpm_pages);
+ svm_vcpu_init_msrpm(svm->msrpm);
+
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
memset(svm->db_regs, 0, sizeof(svm->db_regs));
- init_vmcb(svm->vmcb);
+ init_vmcb(svm);
fx_init(&svm->vcpu);
svm->vcpu.fpu_active = 1;
@@ -617,6 +667,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
+ __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
}
@@ -731,6 +782,13 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->unusable = !var->present;
}
+static int svm_get_cpl(struct kvm_vcpu *vcpu)
+{
+ struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
+
+ return save->cpl;
+}
+
static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -784,6 +842,9 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
}
#endif
+ if (npt_enabled)
+ goto set;
+
if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
vcpu->fpu_active = 1;
@@ -791,18 +852,29 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vcpu->arch.cr0 = cr0;
cr0 |= X86_CR0_PG | X86_CR0_WP;
- cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
if (!vcpu->fpu_active) {
svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
cr0 |= X86_CR0_TS;
}
+set:
+ /*
+ * re-enable caching here because the QEMU bios
+ * does not do it - this results in some delay at
+ * reboot
+ */
+ cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
}
static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- vcpu->arch.cr4 = cr4;
- to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
+ unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
+
+ vcpu->arch.cr4 = cr4;
+ if (!npt_enabled)
+ cr4 |= X86_CR4_PAE;
+ cr4 |= host_cr4_mce;
+ to_svm(vcpu)->vmcb->save.cr4 = cr4;
}
static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -833,13 +905,6 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
}
-/* FIXME:
-
- svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
- svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
-
-*/
-
static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
{
return -EOPNOTSUPP;
@@ -920,7 +985,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
}
default:
printk(KERN_DEBUG "%s: unexpected dr %u\n",
- __FUNCTION__, dr);
+ __func__, dr);
*exception = UD_VECTOR;
return;
}
@@ -962,6 +1027,19 @@ static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
return 1;
}
+static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ /*
+ * On an #MC intercept the MCE handler is not called automatically in
+ * the host. So do it by hand here.
+ */
+ asm volatile (
+ "int $0x12\n");
+ /* not sure if we ever come back to this point */
+
+ return 1;
+}
+
static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
/*
@@ -969,7 +1047,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
* so reinitialize it.
*/
clear_page(svm->vmcb);
- init_vmcb(svm->vmcb);
+ init_vmcb(svm);
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
return 0;
@@ -1033,9 +1111,18 @@ static int invalid_op_interception(struct vcpu_svm *svm,
static int task_switch_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
- pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__);
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
- return 0;
+ u16 tss_selector;
+
+ tss_selector = (u16)svm->vmcb->control.exit_info_1;
+ if (svm->vmcb->control.exit_info_2 &
+ (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
+ return kvm_task_switch(&svm->vcpu, tss_selector,
+ TASK_SWITCH_IRET);
+ if (svm->vmcb->control.exit_info_2 &
+ (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
+ return kvm_task_switch(&svm->vcpu, tss_selector,
+ TASK_SWITCH_JMP);
+ return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
}
static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
@@ -1049,7 +1136,7 @@ static int emulate_on_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run)
{
if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
- pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__);
+ pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
return 1;
}
@@ -1179,8 +1266,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
svm->vmcb->save.sysenter_esp = data;
break;
case MSR_IA32_DEBUGCTLMSR:
- pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
- __FUNCTION__, data);
+ if (!svm_has(SVM_FEATURE_LBRV)) {
+ pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
+ __func__, data);
+ break;
+ }
+ if (data & DEBUGCTL_RESERVED_BITS)
+ return 1;
+
+ svm->vmcb->save.dbgctl = data;
+ if (data & (1ULL<<0))
+ svm_enable_lbrv(svm);
+ else
+ svm_disable_lbrv(svm);
break;
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
@@ -1265,6 +1363,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
[SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
+ [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
[SVM_EXIT_INTR] = nop_on_interception,
[SVM_EXIT_NMI] = nop_on_interception,
[SVM_EXIT_SMI] = nop_on_interception,
@@ -1290,14 +1389,34 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_WBINVD] = emulate_on_interception,
[SVM_EXIT_MONITOR] = invalid_op_interception,
[SVM_EXIT_MWAIT] = invalid_op_interception,
+ [SVM_EXIT_NPF] = pf_interception,
};
-
static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 exit_code = svm->vmcb->control.exit_code;
+ if (npt_enabled) {
+ int mmu_reload = 0;
+ if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
+ svm_set_cr0(vcpu, svm->vmcb->save.cr0);
+ mmu_reload = 1;
+ }
+ vcpu->arch.cr0 = svm->vmcb->save.cr0;
+ vcpu->arch.cr3 = svm->vmcb->save.cr3;
+ if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
+ if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ }
+ if (mmu_reload) {
+ kvm_mmu_reset_context(vcpu);
+ kvm_mmu_load(vcpu);
+ }
+ }
+
kvm_reput_irq(svm);
if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
@@ -1308,10 +1427,11 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
- exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
+ exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
+ exit_code != SVM_EXIT_NPF)
printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
"exit_code 0x%x\n",
- __FUNCTION__, svm->vmcb->control.exit_int_info,
+ __func__, svm->vmcb->control.exit_int_info,
exit_code);
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
@@ -1364,6 +1484,27 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
svm_inject_irq(svm, irq);
}
+static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb *vmcb = svm->vmcb;
+ int max_irr, tpr;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
+ return;
+
+ vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+
+ max_irr = kvm_lapic_find_highest_irr(vcpu);
+ if (max_irr == -1)
+ return;
+
+ tpr = kvm_lapic_get_cr8(vcpu) << 4;
+
+ if (tpr >= (max_irr & 0xf0))
+ vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
+}
+
static void svm_intr_assist(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1376,14 +1517,14 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
SVM_EVTINJ_VEC_MASK;
vmcb->control.exit_int_info = 0;
svm_inject_irq(svm, intr_vector);
- return;
+ goto out;
}
if (vmcb->control.int_ctl & V_IRQ_MASK)
- return;
+ goto out;
if (!kvm_cpu_has_interrupt(vcpu))
- return;
+ goto out;
if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
@@ -1391,12 +1532,14 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
/* unable to deliver irq, set pending irq */
vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
svm_inject_irq(svm, 0x0);
- return;
+ goto out;
}
/* Okay, we can deliver the interrupt: grab it and update PIC state. */
intr_vector = kvm_cpu_get_interrupt(vcpu);
svm_inject_irq(svm, intr_vector);
kvm_timer_intr_post(vcpu, intr_vector);
+out:
+ update_cr8_intercept(vcpu);
}
static void kvm_reput_irq(struct vcpu_svm *svm)
@@ -1482,6 +1625,29 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}
+static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
+ int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
+ kvm_lapic_set_tpr(vcpu, cr8);
+ }
+}
+
+static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ u64 cr8;
+
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return;
+
+ cr8 = kvm_get_cr8(vcpu);
+ svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
+ svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
+}
+
static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1491,6 +1657,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
pre_svm_run(svm);
+ sync_lapic_to_cr8(vcpu);
+
save_host_msrs(vcpu);
fs_selector = read_fs();
gs_selector = read_gs();
@@ -1499,6 +1667,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
svm->host_dr6 = read_dr6();
svm->host_dr7 = read_dr7();
svm->vmcb->save.cr2 = vcpu->arch.cr2;
+ /* required for live migration with NPT */
+ if (npt_enabled)
+ svm->vmcb->save.cr3 = vcpu->arch.cr3;
if (svm->vmcb->save.dr7 & 0xff) {
write_dr7(0);
@@ -1635,6 +1806,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
stgi();
+ sync_cr8_to_lapic(vcpu);
+
svm->next_rip = 0;
}
@@ -1642,6 +1815,12 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (npt_enabled) {
+ svm->vmcb->control.nested_cr3 = root;
+ force_new_asid(vcpu);
+ return;
+ }
+
svm->vmcb->save.cr3 = root;
force_new_asid(vcpu);
@@ -1709,6 +1888,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.get_segment_base = svm_get_segment_base,
.get_segment = svm_get_segment,
.set_segment = svm_set_segment,
+ .get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
.set_cr0 = svm_set_cr0,
diff --git a/arch/x86/kvm/svm.h b/arch/x86/kvm/svm.h
index 5fd50491b55..1b8afa78e86 100644
--- a/arch/x86/kvm/svm.h
+++ b/arch/x86/kvm/svm.h
@@ -238,6 +238,9 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
+#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
+#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+
#define SVM_EXIT_READ_CR0 0x000
#define SVM_EXIT_READ_CR3 0x003
#define SVM_EXIT_READ_CR4 0x004
diff --git a/arch/x86/kvm/tss.h b/arch/x86/kvm/tss.h
new file mode 100644
index 00000000000..622aa10f692
--- /dev/null
+++ b/arch/x86/kvm/tss.h
@@ -0,0 +1,59 @@
+#ifndef __TSS_SEGMENT_H
+#define __TSS_SEGMENT_H
+
+struct tss_segment_32 {
+ u32 prev_task_link;
+ u32 esp0;
+ u32 ss0;
+ u32 esp1;
+ u32 ss1;
+ u32 esp2;
+ u32 ss2;
+ u32 cr3;
+ u32 eip;
+ u32 eflags;
+ u32 eax;
+ u32 ecx;
+ u32 edx;
+ u32 ebx;
+ u32 esp;
+ u32 ebp;
+ u32 esi;
+ u32 edi;
+ u32 es;
+ u32 cs;
+ u32 ss;
+ u32 ds;
+ u32 fs;
+ u32 gs;
+ u32 ldt_selector;
+ u16 t;
+ u16 io_map;
+};
+
+struct tss_segment_16 {
+ u16 prev_task_link;
+ u16 sp0;
+ u16 ss0;
+ u16 sp1;
+ u16 ss1;
+ u16 sp2;
+ u16 ss2;
+ u16 ip;
+ u16 flag;
+ u16 ax;
+ u16 cx;
+ u16 dx;
+ u16 bx;
+ u16 sp;
+ u16 bp;
+ u16 si;
+ u16 di;
+ u16 es;
+ u16 cs;
+ u16 ss;
+ u16 ds;
+ u16 ldt;
+};
+
+#endif
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8e1462880d1..8e5d6645b90 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -17,7 +17,6 @@
#include "irq.h"
#include "vmx.h"
-#include "segment_descriptor.h"
#include "mmu.h"
#include <linux/kvm_host.h>
@@ -37,6 +36,12 @@ MODULE_LICENSE("GPL");
static int bypass_guest_pf = 1;
module_param(bypass_guest_pf, bool, 0);
+static int enable_vpid = 1;
+module_param(enable_vpid, bool, 0);
+
+static int flexpriority_enabled = 1;
+module_param(flexpriority_enabled, bool, 0);
+
struct vmcs {
u32 revision_id;
u32 abort;
@@ -71,6 +76,7 @@ struct vcpu_vmx {
unsigned rip;
} irq;
} rmode;
+ int vpid;
};
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -85,6 +91,10 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
static struct page *vmx_io_bitmap_a;
static struct page *vmx_io_bitmap_b;
+static struct page *vmx_msr_bitmap;
+
+static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
+static DEFINE_SPINLOCK(vmx_vpid_lock);
static struct vmcs_config {
int size;
@@ -176,6 +186,11 @@ static inline int is_external_interrupt(u32 intr_info)
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
+static inline int cpu_has_vmx_msr_bitmap(void)
+{
+ return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS);
+}
+
static inline int cpu_has_vmx_tpr_shadow(void)
{
return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
@@ -194,8 +209,9 @@ static inline int cpu_has_secondary_exec_ctrls(void)
static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
{
- return (vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+ return flexpriority_enabled
+ && (vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
}
static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
@@ -204,6 +220,12 @@ static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
(irqchip_in_kernel(kvm)));
}
+static inline int cpu_has_vmx_vpid(void)
+{
+ return (vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_ENABLE_VPID);
+}
+
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -214,6 +236,20 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
return -1;
}
+static inline void __invvpid(int ext, u16 vpid, gva_t gva)
+{
+ struct {
+ u64 vpid : 16;
+ u64 rsvd : 48;
+ u64 gva;
+ } operand = { vpid, 0, gva };
+
+ asm volatile (ASM_VMX_INVVPID
+ /* CF==1 or ZF==1 --> rc = -1 */
+ "; ja 1f ; ud2 ; 1:"
+ : : "a"(&operand), "c"(ext) : "cc", "memory");
+}
+
static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -257,6 +293,14 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
vmx->launched = 0;
}
+static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
+{
+ if (vmx->vpid == 0)
+ return;
+
+ __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
+}
+
static unsigned long vmcs_readl(unsigned long field)
{
unsigned long value;
@@ -353,7 +397,7 @@ static void reload_tss(void)
* VT restores TR but not its size. Useless.
*/
struct descriptor_table gdt;
- struct segment_descriptor *descs;
+ struct desc_struct *descs;
get_gdt(&gdt);
descs = (void *)gdt.base;
@@ -485,11 +529,12 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(vmx->vmcs);
- u64 tsc_this, delta;
+ u64 tsc_this, delta, new_offset;
if (vcpu->cpu != cpu) {
vcpu_clear(vmx);
kvm_migrate_apic_timer(vcpu);
+ vpid_sync_vcpu_all(vmx);
}
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
@@ -524,8 +569,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* Make sure the time stamp counter is monotonous.
*/
rdtscll(tsc_this);
- delta = vcpu->arch.host_tsc - tsc_this;
- vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
+ if (tsc_this < vcpu->arch.host_tsc) {
+ delta = vcpu->arch.host_tsc - tsc_this;
+ new_offset = vmcs_read64(TSC_OFFSET) + delta;
+ vmcs_write64(TSC_OFFSET, new_offset);
+ }
}
}
@@ -596,7 +644,7 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
{
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
nr | INTR_TYPE_EXCEPTION
- | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0)
+ | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0)
| INTR_INFO_VALID_MASK);
if (has_error_code)
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
@@ -959,6 +1007,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING;
opt = CPU_BASED_TPR_SHADOW |
+ CPU_BASED_USE_MSR_BITMAPS |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
&_cpu_based_exec_control) < 0)
@@ -971,7 +1020,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
min = 0;
opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_WBINVD_EXITING;
+ SECONDARY_EXEC_WBINVD_EXITING |
+ SECONDARY_EXEC_ENABLE_VPID;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
return -EIO;
@@ -1080,6 +1130,10 @@ static __init int hardware_setup(void)
{
if (setup_vmcs_config(&vmcs_config) < 0)
return -EIO;
+
+ if (boot_cpu_has(X86_FEATURE_NX))
+ kvm_enable_efer_bits(EFER_NX);
+
return alloc_kvm_area();
}
@@ -1214,7 +1268,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
- __FUNCTION__);
+ __func__);
vmcs_write32(GUEST_TR_AR_BYTES,
(guest_tr_ar & ~AR_TYPE_MASK)
| AR_TYPE_BUSY_64_TSS);
@@ -1239,6 +1293,11 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
#endif
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ vpid_sync_vcpu_all(to_vmx(vcpu));
+}
+
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
@@ -1275,6 +1334,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
+ vmx_flush_tlb(vcpu);
vmcs_writel(GUEST_CR3, cr3);
if (vcpu->arch.cr0 & X86_CR0_PE)
vmx_fpu_deactivate(vcpu);
@@ -1288,14 +1348,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu->arch.cr4 = cr4;
}
-#ifdef CONFIG_X86_64
-
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
vcpu->arch.shadow_efer = efer;
+ if (!msr)
+ return;
if (efer & EFER_LMA) {
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1312,8 +1372,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
setup_msrs(vmx);
}
-#endif
-
static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -1344,6 +1402,20 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
var->unusable = (ar >> 16) & 1;
}
+static int vmx_get_cpl(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment kvm_seg;
+
+ if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
+ return 0;
+
+ if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
+ return 3;
+
+ vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
+ return kvm_seg.selector & 3;
+}
+
static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
u32 ar;
@@ -1433,7 +1505,6 @@ static int init_rmode_tss(struct kvm *kvm)
int ret = 0;
int r;
- down_read(&kvm->slots_lock);
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0)
goto out;
@@ -1456,7 +1527,6 @@ static int init_rmode_tss(struct kvm *kvm)
ret = 1;
out:
- up_read(&kvm->slots_lock);
return ret;
}
@@ -1494,6 +1564,46 @@ out:
return r;
}
+static void allocate_vpid(struct vcpu_vmx *vmx)
+{
+ int vpid;
+
+ vmx->vpid = 0;
+ if (!enable_vpid || !cpu_has_vmx_vpid())
+ return;
+ spin_lock(&vmx_vpid_lock);
+ vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
+ if (vpid < VMX_NR_VPIDS) {
+ vmx->vpid = vpid;
+ __set_bit(vpid, vmx_vpid_bitmap);
+ }
+ spin_unlock(&vmx_vpid_lock);
+}
+
+void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
+{
+ void *va;
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return;
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ va = kmap(msr_bitmap);
+ if (msr <= 0x1fff) {
+ __clear_bit(msr, va + 0x000); /* read-low */
+ __clear_bit(msr, va + 0x800); /* write-low */
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ __clear_bit(msr, va + 0x400); /* read-high */
+ __clear_bit(msr, va + 0xc00); /* write-high */
+ }
+ kunmap(msr_bitmap);
+}
+
/*
* Sets up the vmcs for emulated real mode.
*/
@@ -1511,6 +1621,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
+ if (cpu_has_vmx_msr_bitmap())
+ vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap));
+
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
/* Control */
@@ -1532,6 +1645,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
exec_control &=
~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ if (vmx->vpid == 0)
+ exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
@@ -1613,6 +1728,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
u64 msr;
int ret;
+ down_read(&vcpu->kvm->slots_lock);
if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
@@ -1621,7 +1737,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.rmode.active = 0;
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
- set_cr8(&vmx->vcpu, 0);
+ kvm_set_cr8(&vmx->vcpu, 0);
msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (vmx->vcpu.vcpu_id == 0)
msr |= MSR_IA32_APICBASE_BSP;
@@ -1704,18 +1820,22 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_write64(APIC_ACCESS_ADDR,
page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
+ if (vmx->vpid != 0)
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+
vmx->vcpu.arch.cr0 = 0x60000010;
vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
vmx_set_cr4(&vmx->vcpu, 0);
-#ifdef CONFIG_X86_64
vmx_set_efer(&vmx->vcpu, 0);
-#endif
vmx_fpu_activate(&vmx->vcpu);
update_exception_bitmap(&vmx->vcpu);
- return 0;
+ vpid_sync_vcpu_all(vmx);
+
+ ret = 0;
out:
+ up_read(&vcpu->kvm->slots_lock);
return ret;
}
@@ -1723,6 +1843,8 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
+
if (vcpu->arch.rmode.active) {
vmx->rmode.irq.pending = true;
vmx->rmode.irq.vector = irq;
@@ -1844,7 +1966,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if ((vect_info & VECTORING_INFO_VALID_MASK) &&
!is_page_fault(intr_info))
printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
- "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
+ "intr info 0x%x\n", __func__, vect_info, intr_info);
if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
@@ -1869,10 +1991,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
error_code = 0;
rip = vmcs_readl(GUEST_RIP);
- if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
+ if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
if (is_page_fault(intr_info)) {
cr2 = vmcs_readl(EXIT_QUALIFICATION);
+ KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
+ (u32)((u64)cr2 >> 32), handler);
return kvm_mmu_page_fault(vcpu, cr2, error_code);
}
@@ -1901,6 +2025,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
++vcpu->stat.irq_exits;
+ KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler);
return 1;
}
@@ -1958,25 +2083,27 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
+ KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg],
+ (u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
switch (cr) {
case 0:
vcpu_load_rsp_rip(vcpu);
- set_cr0(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
case 3:
vcpu_load_rsp_rip(vcpu);
- set_cr3(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
case 4:
vcpu_load_rsp_rip(vcpu);
- set_cr4(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
case 8:
vcpu_load_rsp_rip(vcpu);
- set_cr8(vcpu, vcpu->arch.regs[reg]);
+ kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
return 1;
@@ -1990,6 +2117,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.cr0 &= ~X86_CR0_TS;
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
vmx_fpu_activate(vcpu);
+ KVMTRACE_0D(CLTS, vcpu, handler);
skip_emulated_instruction(vcpu);
return 1;
case 1: /*mov from cr*/
@@ -1998,18 +2126,24 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu_load_rsp_rip(vcpu);
vcpu->arch.regs[reg] = vcpu->arch.cr3;
vcpu_put_rsp_rip(vcpu);
+ KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
+ (u32)vcpu->arch.regs[reg],
+ (u32)((u64)vcpu->arch.regs[reg] >> 32),
+ handler);
skip_emulated_instruction(vcpu);
return 1;
case 8:
vcpu_load_rsp_rip(vcpu);
- vcpu->arch.regs[reg] = get_cr8(vcpu);
+ vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
vcpu_put_rsp_rip(vcpu);
+ KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
+ (u32)vcpu->arch.regs[reg], handler);
skip_emulated_instruction(vcpu);
return 1;
}
break;
case 3: /* lmsw */
- lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
+ kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
skip_emulated_instruction(vcpu);
return 1;
@@ -2049,6 +2183,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
val = 0;
}
vcpu->arch.regs[reg] = val;
+ KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
} else {
/* mov to dr */
}
@@ -2073,6 +2208,9 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+ KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32),
+ handler);
+
/* FIXME: handling of bits 32:63 of rax, rdx */
vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
@@ -2086,6 +2224,9 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+ KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32),
+ handler);
+
if (vmx_set_msr(vcpu, ecx, data) != 0) {
kvm_inject_gp(vcpu, 0);
return 1;
@@ -2110,6 +2251,9 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+
+ KVMTRACE_0D(PEND_INTR, vcpu, handler);
+
/*
* If the user space waits to inject interrupts, exit as soon as
* possible
@@ -2152,6 +2296,8 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
offset = exit_qualification & 0xffful;
+ KVMTRACE_1D(APIC_ACCESS, vcpu, (u32)offset, handler);
+
er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
if (er != EMULATE_DONE) {
@@ -2163,6 +2309,20 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ unsigned long exit_qualification;
+ u16 tss_selector;
+ int reason;
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ reason = (u32)exit_qualification >> 30;
+ tss_selector = exit_qualification;
+
+ return kvm_task_switch(vcpu, tss_selector, reason);
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -2185,6 +2345,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_WBINVD] = handle_wbinvd,
+ [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
};
static const int kvm_vmx_max_exit_handlers =
@@ -2200,6 +2361,9 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vectoring_info = vmx->idt_vectoring_info;
+ KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP),
+ (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit);
+
if (unlikely(vmx->fail)) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
@@ -2210,7 +2374,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
exit_reason != EXIT_REASON_EXCEPTION_NMI)
printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
- "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
+ "exit reason is 0x%x\n", __func__, exit_reason);
if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
@@ -2221,10 +2385,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return 0;
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
-{
-}
-
static void update_tpr_threshold(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
@@ -2285,11 +2445,13 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
return;
}
+ KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler);
+
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
- if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
+ if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK))
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
vmcs_read32(IDT_VECTORING_ERROR_CODE));
if (unlikely(has_ext_irq))
@@ -2470,8 +2632,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
/* We need to handle NMIs before interrupts are enabled */
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
+ KVMTRACE_0D(NMI, vcpu, handler);
asm("int $2");
+ }
}
static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
@@ -2489,6 +2653,10 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ spin_lock(&vmx_vpid_lock);
+ if (vmx->vpid != 0)
+ __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+ spin_unlock(&vmx_vpid_lock);
vmx_free_vmcs(vcpu);
kfree(vmx->host_msrs);
kfree(vmx->guest_msrs);
@@ -2505,6 +2673,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (!vmx)
return ERR_PTR(-ENOMEM);
+ allocate_vpid(vmx);
+
err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
if (err)
goto free_vcpu;
@@ -2591,14 +2761,13 @@ static struct kvm_x86_ops vmx_x86_ops = {
.get_segment_base = vmx_get_segment_base,
.get_segment = vmx_get_segment,
.set_segment = vmx_set_segment,
+ .get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
.set_cr0 = vmx_set_cr0,
.set_cr3 = vmx_set_cr3,
.set_cr4 = vmx_set_cr4,
-#ifdef CONFIG_X86_64
.set_efer = vmx_set_efer,
-#endif
.get_idt = vmx_get_idt,
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
@@ -2626,7 +2795,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
static int __init vmx_init(void)
{
- void *iova;
+ void *va;
int r;
vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
@@ -2639,28 +2808,48 @@ static int __init vmx_init(void)
goto out;
}
+ vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!vmx_msr_bitmap) {
+ r = -ENOMEM;
+ goto out1;
+ }
+
/*
* Allow direct access to the PC debug port (it is often used for I/O
* delays, but the vmexits simply slow things down).
*/
- iova = kmap(vmx_io_bitmap_a);
- memset(iova, 0xff, PAGE_SIZE);
- clear_bit(0x80, iova);
+ va = kmap(vmx_io_bitmap_a);
+ memset(va, 0xff, PAGE_SIZE);
+ clear_bit(0x80, va);
kunmap(vmx_io_bitmap_a);
- iova = kmap(vmx_io_bitmap_b);
- memset(iova, 0xff, PAGE_SIZE);
+ va = kmap(vmx_io_bitmap_b);
+ memset(va, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b);
+ va = kmap(vmx_msr_bitmap);
+ memset(va, 0xff, PAGE_SIZE);
+ kunmap(vmx_msr_bitmap);
+
+ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r)
- goto out1;
+ goto out2;
+
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE);
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE);
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS);
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
+ vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
if (bypass_guest_pf)
kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
return 0;
+out2:
+ __free_page(vmx_msr_bitmap);
out1:
__free_page(vmx_io_bitmap_b);
out:
@@ -2670,6 +2859,7 @@ out:
static void __exit vmx_exit(void)
{
+ __free_page(vmx_msr_bitmap);
__free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a);
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index d52ae8d7303..5dff4606b98 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -49,6 +49,7 @@
* Definitions of Secondary Processor-Based VM-Execution Controls.
*/
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
+#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
@@ -65,6 +66,7 @@
/* VMCS Encodings */
enum vmcs_field {
+ VIRTUAL_PROCESSOR_ID = 0x00000000,
GUEST_ES_SELECTOR = 0x00000800,
GUEST_CS_SELECTOR = 0x00000802,
GUEST_SS_SELECTOR = 0x00000804,
@@ -231,12 +233,12 @@ enum vmcs_field {
*/
#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
-#define INTR_INFO_DELIEVER_CODE_MASK 0x800 /* 11 */
+#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
-#define VECTORING_INFO_DELIEVER_CODE_MASK INTR_INFO_DELIEVER_CODE_MASK
+#define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK
#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
@@ -321,4 +323,8 @@ enum vmcs_field {
#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9
+#define VMX_NR_VPIDS (1 << 16)
+#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
+#define VMX_VPID_EXTENT_ALL_CONTEXT 2
+
#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6b01552bd1f..0ce556372a4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -15,10 +15,12 @@
*/
#include <linux/kvm_host.h>
-#include "segment_descriptor.h"
#include "irq.h"
#include "mmu.h"
+#include "i8254.h"
+#include "tss.h"
+#include <linux/clocksource.h>
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
@@ -28,6 +30,7 @@
#include <asm/uaccess.h>
#include <asm/msr.h>
+#include <asm/desc.h>
#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
@@ -41,7 +44,15 @@
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
-#define EFER_RESERVED_BITS 0xfffffffffffff2fe
+/* EFER defaults:
+ * - enable syscall per default because its emulated by KVM
+ * - enable LME and LMA per default on 64 bit KVM
+ */
+#ifdef CONFIG_X86_64
+static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
+#else
+static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
+#endif
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -63,6 +74,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "irq_window", VCPU_STAT(irq_window_exits) },
{ "halt_exits", VCPU_STAT(halt_exits) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
+ { "hypercalls", VCPU_STAT(hypercalls) },
{ "request_irq", VCPU_STAT(request_irq_exits) },
{ "irq_exits", VCPU_STAT(irq_exits) },
{ "host_state_reload", VCPU_STAT(host_state_reload) },
@@ -78,6 +90,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "mmu_recycled", VM_STAT(mmu_recycled) },
{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
+ { "largepages", VM_STAT(lpages) },
{ NULL }
};
@@ -85,7 +98,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
unsigned long segment_base(u16 selector)
{
struct descriptor_table gdt;
- struct segment_descriptor *d;
+ struct desc_struct *d;
unsigned long table_base;
unsigned long v;
@@ -101,13 +114,12 @@ unsigned long segment_base(u16 selector)
asm("sldt %0" : "=g"(ldt_selector));
table_base = segment_base(ldt_selector);
}
- d = (struct segment_descriptor *)(table_base + (selector & ~7));
- v = d->base_low | ((unsigned long)d->base_mid << 16) |
- ((unsigned long)d->base_high << 24);
+ d = (struct desc_struct *)(table_base + (selector & ~7));
+ v = d->base0 | ((unsigned long)d->base1 << 16) |
+ ((unsigned long)d->base2 << 24);
#ifdef CONFIG_X86_64
- if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
- v |= ((unsigned long) \
- ((struct segment_descriptor_64 *)d)->base_higher) << 32;
+ if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+ v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
#endif
return v;
}
@@ -145,11 +157,16 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
u32 error_code)
{
++vcpu->stat.pf_guest;
- if (vcpu->arch.exception.pending && vcpu->arch.exception.nr == PF_VECTOR) {
- printk(KERN_DEBUG "kvm: inject_page_fault:"
- " double fault 0x%lx\n", addr);
- vcpu->arch.exception.nr = DF_VECTOR;
- vcpu->arch.exception.error_code = 0;
+ if (vcpu->arch.exception.pending) {
+ if (vcpu->arch.exception.nr == PF_VECTOR) {
+ printk(KERN_DEBUG "kvm: inject_page_fault:"
+ " double fault 0x%lx\n", addr);
+ vcpu->arch.exception.nr = DF_VECTOR;
+ vcpu->arch.exception.error_code = 0;
+ } else if (vcpu->arch.exception.nr == DF_VECTOR) {
+ /* triple fault -> shutdown */
+ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+ }
return;
}
vcpu->arch.cr2 = addr;
@@ -184,7 +201,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
int ret;
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
- down_read(&vcpu->kvm->slots_lock);
ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
offset * sizeof(u64), sizeof(pdpte));
if (ret < 0) {
@@ -201,10 +217,10 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
out:
- up_read(&vcpu->kvm->slots_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(load_pdptrs);
static bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
@@ -215,18 +231,16 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
if (is_long_mode(vcpu) || !is_pae(vcpu))
return false;
- down_read(&vcpu->kvm->slots_lock);
r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
if (r < 0)
goto out;
changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
out:
- up_read(&vcpu->kvm->slots_lock);
return changed;
}
-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
if (cr0 & CR0_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
@@ -284,15 +298,18 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
kvm_mmu_reset_context(vcpu);
return;
}
-EXPORT_SYMBOL_GPL(set_cr0);
+EXPORT_SYMBOL_GPL(kvm_set_cr0);
-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
+void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
- set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
+ kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
+ KVMTRACE_1D(LMSW, vcpu,
+ (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
+ handler);
}
-EXPORT_SYMBOL_GPL(lmsw);
+EXPORT_SYMBOL_GPL(kvm_lmsw);
-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
if (cr4 & CR4_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
@@ -323,9 +340,9 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu->arch.cr4 = cr4;
kvm_mmu_reset_context(vcpu);
}
-EXPORT_SYMBOL_GPL(set_cr4);
+EXPORT_SYMBOL_GPL(kvm_set_cr4);
-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
kvm_mmu_flush_tlb(vcpu);
@@ -359,7 +376,6 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
*/
}
- down_read(&vcpu->kvm->slots_lock);
/*
* Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would
@@ -375,11 +391,10 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vcpu->arch.cr3 = cr3;
vcpu->arch.mmu.new_cr3(vcpu);
}
- up_read(&vcpu->kvm->slots_lock);
}
-EXPORT_SYMBOL_GPL(set_cr3);
+EXPORT_SYMBOL_GPL(kvm_set_cr3);
-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
@@ -391,16 +406,16 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
else
vcpu->arch.cr8 = cr8;
}
-EXPORT_SYMBOL_GPL(set_cr8);
+EXPORT_SYMBOL_GPL(kvm_set_cr8);
-unsigned long get_cr8(struct kvm_vcpu *vcpu)
+unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
{
if (irqchip_in_kernel(vcpu->kvm))
return kvm_lapic_get_cr8(vcpu);
else
return vcpu->arch.cr8;
}
-EXPORT_SYMBOL_GPL(get_cr8);
+EXPORT_SYMBOL_GPL(kvm_get_cr8);
/*
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
@@ -415,7 +430,8 @@ static u32 msrs_to_save[] = {
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
- MSR_IA32_TIME_STAMP_COUNTER,
+ MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+ MSR_IA32_PERF_STATUS,
};
static unsigned num_msrs_to_save;
@@ -424,11 +440,9 @@ static u32 emulated_msrs[] = {
MSR_IA32_MISC_ENABLE,
};
-#ifdef CONFIG_X86_64
-
static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (efer & EFER_RESERVED_BITS) {
+ if (efer & efer_reserved_bits) {
printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
efer);
kvm_inject_gp(vcpu, 0);
@@ -450,7 +464,12 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
vcpu->arch.shadow_efer = efer;
}
-#endif
+void kvm_enable_efer_bits(u64 mask)
+{
+ efer_reserved_bits &= ~mask;
+}
+EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+
/*
* Writes msr value into into the appropriate "register".
@@ -470,26 +489,86 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
return kvm_set_msr(vcpu, index, *data);
}
+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
+{
+ static int version;
+ struct kvm_wall_clock wc;
+ struct timespec wc_ts;
+
+ if (!wall_clock)
+ return;
+
+ version++;
+
+ kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
+
+ wc_ts = current_kernel_time();
+ wc.wc_sec = wc_ts.tv_sec;
+ wc.wc_nsec = wc_ts.tv_nsec;
+ wc.wc_version = version;
+
+ kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
+
+ version++;
+ kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
+}
+
+static void kvm_write_guest_time(struct kvm_vcpu *v)
+{
+ struct timespec ts;
+ unsigned long flags;
+ struct kvm_vcpu_arch *vcpu = &v->arch;
+ void *shared_kaddr;
+
+ if ((!vcpu->time_page))
+ return;
+
+ /* Keep irq disabled to prevent changes to the clock */
+ local_irq_save(flags);
+ kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
+ &vcpu->hv_clock.tsc_timestamp);
+ ktime_get_ts(&ts);
+ local_irq_restore(flags);
+
+ /* With all the info we got, fill in the values */
+
+ vcpu->hv_clock.system_time = ts.tv_nsec +
+ (NSEC_PER_SEC * (u64)ts.tv_sec);
+ /*
+ * The interface expects us to write an even number signaling that the
+ * update is finished. Since the guest won't see the intermediate
+ * state, we just write "2" at the end
+ */
+ vcpu->hv_clock.version = 2;
+
+ shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
+
+ memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
+
+ kunmap_atomic(shared_kaddr, KM_USER0);
+
+ mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
-#ifdef CONFIG_X86_64
case MSR_EFER:
set_efer(vcpu, data);
break;
-#endif
case MSR_IA32_MC0_STATUS:
pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
- __FUNCTION__, data);
+ __func__, data);
break;
case MSR_IA32_MCG_STATUS:
pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
- __FUNCTION__, data);
+ __func__, data);
break;
case MSR_IA32_MCG_CTL:
pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
- __FUNCTION__, data);
+ __func__, data);
break;
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
@@ -501,6 +580,42 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
+ case MSR_KVM_WALL_CLOCK:
+ vcpu->kvm->arch.wall_clock = data;
+ kvm_write_wall_clock(vcpu->kvm, data);
+ break;
+ case MSR_KVM_SYSTEM_TIME: {
+ if (vcpu->arch.time_page) {
+ kvm_release_page_dirty(vcpu->arch.time_page);
+ vcpu->arch.time_page = NULL;
+ }
+
+ vcpu->arch.time = data;
+
+ /* we verify if the enable bit is set... */
+ if (!(data & 1))
+ break;
+
+ /* ...but clean it before doing the actual write */
+ vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+
+ vcpu->arch.hv_clock.tsc_to_system_mul =
+ clocksource_khz2mult(tsc_khz, 22);
+ vcpu->arch.hv_clock.tsc_shift = 22;
+
+ down_read(&current->mm->mmap_sem);
+ vcpu->arch.time_page =
+ gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+ up_read(&current->mm->mmap_sem);
+
+ if (is_error_page(vcpu->arch.time_page)) {
+ kvm_release_page_clean(vcpu->arch.time_page);
+ vcpu->arch.time_page = NULL;
+ }
+
+ kvm_write_guest_time(vcpu);
+ break;
+ }
default:
pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
return 1;
@@ -540,7 +655,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MC0_MISC+12:
case MSR_IA32_MC0_MISC+16:
case MSR_IA32_UCODE_REV:
- case MSR_IA32_PERF_STATUS:
case MSR_IA32_EBL_CR_POWERON:
/* MTRR registers */
case 0xfe:
@@ -556,11 +670,21 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MISC_ENABLE:
data = vcpu->arch.ia32_misc_enable_msr;
break;
-#ifdef CONFIG_X86_64
+ case MSR_IA32_PERF_STATUS:
+ /* TSC increment by tick */
+ data = 1000ULL;
+ /* CPU multiplier */
+ data |= (((uint64_t)4ULL) << 40);
+ break;
case MSR_EFER:
data = vcpu->arch.shadow_efer;
break;
-#endif
+ case MSR_KVM_WALL_CLOCK:
+ data = vcpu->kvm->arch.wall_clock;
+ break;
+ case MSR_KVM_SYSTEM_TIME:
+ data = vcpu->arch.time;
+ break;
default:
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1;
@@ -584,9 +708,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
vcpu_load(vcpu);
+ down_read(&vcpu->kvm->slots_lock);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
break;
+ up_read(&vcpu->kvm->slots_lock);
vcpu_put(vcpu);
@@ -688,11 +814,24 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_USER_MEMORY:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
+ case KVM_CAP_CLOCKSOURCE:
+ case KVM_CAP_PIT:
+ case KVM_CAP_NOP_IO_DELAY:
+ case KVM_CAP_MP_STATE:
r = 1;
break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
break;
+ case KVM_CAP_NR_VCPUS:
+ r = KVM_MAX_VCPUS;
+ break;
+ case KVM_CAP_NR_MEMSLOTS:
+ r = KVM_MEMORY_SLOTS;
+ break;
+ case KVM_CAP_PV_MMU:
+ r = !tdp_enabled;
+ break;
default:
r = 0;
break;
@@ -763,6 +902,7 @@ out:
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
kvm_x86_ops->vcpu_load(vcpu, cpu);
+ kvm_write_guest_time(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -958,32 +1098,32 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
/* function 4 and 0xb have additional index. */
case 4: {
- int index, cache_type;
+ int i, cache_type;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
/* read more entries until cache_type is zero */
- for (index = 1; *nent < maxnent; ++index) {
- cache_type = entry[index - 1].eax & 0x1f;
+ for (i = 1; *nent < maxnent; ++i) {
+ cache_type = entry[i - 1].eax & 0x1f;
if (!cache_type)
break;
- do_cpuid_1_ent(&entry[index], function, index);
- entry[index].flags |=
+ do_cpuid_1_ent(&entry[i], function, i);
+ entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
}
break;
}
case 0xb: {
- int index, level_type;
+ int i, level_type;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
/* read more entries until level_type is zero */
- for (index = 1; *nent < maxnent; ++index) {
- level_type = entry[index - 1].ecx & 0xff;
+ for (i = 1; *nent < maxnent; ++i) {
+ level_type = entry[i - 1].ecx & 0xff;
if (!level_type)
break;
- do_cpuid_1_ent(&entry[index], function, index);
- entry[index].flags |=
+ do_cpuid_1_ent(&entry[i], function, i);
+ entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
}
@@ -1365,6 +1505,23 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
return r;
}
+static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+{
+ int r = 0;
+
+ memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
+ return r;
+}
+
+static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+{
+ int r = 0;
+
+ memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
+ kvm_pit_load_count(kvm, 0, ps->channels[0].count);
+ return r;
+}
+
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
@@ -1457,6 +1614,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
} else
goto out;
break;
+ case KVM_CREATE_PIT:
+ r = -ENOMEM;
+ kvm->arch.vpit = kvm_create_pit(kvm);
+ if (kvm->arch.vpit)
+ r = 0;
+ break;
case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event;
@@ -1512,6 +1675,37 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_GET_PIT: {
+ struct kvm_pit_state ps;
+ r = -EFAULT;
+ if (copy_from_user(&ps, argp, sizeof ps))
+ goto out;
+ r = -ENXIO;
+ if (!kvm->arch.vpit)
+ goto out;
+ r = kvm_vm_ioctl_get_pit(kvm, &ps);
+ if (r)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(argp, &ps, sizeof ps))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_PIT: {
+ struct kvm_pit_state ps;
+ r = -EFAULT;
+ if (copy_from_user(&ps, argp, sizeof ps))
+ goto out;
+ r = -ENXIO;
+ if (!kvm->arch.vpit)
+ goto out;
+ r = kvm_vm_ioctl_set_pit(kvm, &ps);
+ if (r)
+ goto out;
+ r = 0;
+ break;
+ }
default:
;
}
@@ -1570,7 +1764,6 @@ int emulator_read_std(unsigned long addr,
void *data = val;
int r = X86EMUL_CONTINUE;
- down_read(&vcpu->kvm->slots_lock);
while (bytes) {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
unsigned offset = addr & (PAGE_SIZE-1);
@@ -1592,7 +1785,6 @@ int emulator_read_std(unsigned long addr,
addr += tocopy;
}
out:
- up_read(&vcpu->kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(emulator_read_std);
@@ -1611,9 +1803,7 @@ static int emulator_read_emulated(unsigned long addr,
return X86EMUL_CONTINUE;
}
- down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
- up_read(&vcpu->kvm->slots_lock);
/* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1646,19 +1836,15 @@ mmio:
return X86EMUL_UNHANDLEABLE;
}
-static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
- const void *val, int bytes)
+int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const void *val, int bytes)
{
int ret;
- down_read(&vcpu->kvm->slots_lock);
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
- if (ret < 0) {
- up_read(&vcpu->kvm->slots_lock);
+ if (ret < 0)
return 0;
- }
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
- up_read(&vcpu->kvm->slots_lock);
return 1;
}
@@ -1670,9 +1856,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
struct kvm_io_device *mmio_dev;
gpa_t gpa;
- down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
- up_read(&vcpu->kvm->slots_lock);
if (gpa == UNMAPPED_GVA) {
kvm_inject_page_fault(vcpu, addr, 2);
@@ -1749,7 +1933,6 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
char *kaddr;
u64 val;
- down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
if (gpa == UNMAPPED_GVA ||
@@ -1769,9 +1952,8 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
kunmap_atomic(kaddr, KM_USER0);
kvm_release_page_dirty(page);
- emul_write:
- up_read(&vcpu->kvm->slots_lock);
}
+emul_write:
#endif
return emulator_write_emulated(addr, new, bytes, vcpu);
@@ -1802,7 +1984,7 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
*dest = kvm_x86_ops->get_dr(vcpu, dr);
return X86EMUL_CONTINUE;
default:
- pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
+ pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
return X86EMUL_UNHANDLEABLE;
}
}
@@ -1840,7 +2022,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
}
EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
-struct x86_emulate_ops emulate_ops = {
+static struct x86_emulate_ops emulate_ops = {
.read_std = emulator_read_std,
.read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated,
@@ -2091,6 +2273,13 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->arch.pio.guest_page_offset = 0;
vcpu->arch.pio.rep = 0;
+ if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+ KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
+ handler);
+ else
+ KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
+ handler);
+
kvm_x86_ops->cache_regs(vcpu);
memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
kvm_x86_ops->decache_regs(vcpu);
@@ -2129,6 +2318,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->arch.pio.guest_page_offset = offset_in_page(address);
vcpu->arch.pio.rep = rep;
+ if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+ KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
+ handler);
+ else
+ KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
+ handler);
+
if (!count) {
kvm_x86_ops->skip_emulated_instruction(vcpu);
return 1;
@@ -2163,10 +2359,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
kvm_x86_ops->skip_emulated_instruction(vcpu);
for (i = 0; i < nr_pages; ++i) {
- down_read(&vcpu->kvm->slots_lock);
page = gva_to_page(vcpu, address + i * PAGE_SIZE);
vcpu->arch.pio.guest_pages[i] = page;
- up_read(&vcpu->kvm->slots_lock);
if (!page) {
kvm_inject_gp(vcpu, 0);
free_pio_guest_pages(vcpu);
@@ -2238,10 +2432,13 @@ void kvm_arch_exit(void)
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
+ KVMTRACE_0D(HLT, vcpu, handler);
if (irqchip_in_kernel(vcpu->kvm)) {
- vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
+ vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+ up_read(&vcpu->kvm->slots_lock);
kvm_vcpu_block(vcpu);
- if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
+ down_read(&vcpu->kvm->slots_lock);
+ if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
return 1;
} else {
@@ -2251,9 +2448,19 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
+ unsigned long a1)
+{
+ if (is_long_mode(vcpu))
+ return a0;
+ else
+ return a0 | ((gpa_t)a1 << 32);
+}
+
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
+ int r = 1;
kvm_x86_ops->cache_regs(vcpu);
@@ -2263,6 +2470,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
a2 = vcpu->arch.regs[VCPU_REGS_RDX];
a3 = vcpu->arch.regs[VCPU_REGS_RSI];
+ KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
+
if (!is_long_mode(vcpu)) {
nr &= 0xFFFFFFFF;
a0 &= 0xFFFFFFFF;
@@ -2275,13 +2484,17 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
case KVM_HC_VAPIC_POLL_IRQ:
ret = 0;
break;
+ case KVM_HC_MMU_OP:
+ r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
+ break;
default:
ret = -KVM_ENOSYS;
break;
}
vcpu->arch.regs[VCPU_REGS_RAX] = ret;
kvm_x86_ops->decache_regs(vcpu);
- return 0;
+ ++vcpu->stat.hypercalls;
+ return r;
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
@@ -2329,7 +2542,7 @@ void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
unsigned long *rflags)
{
- lmsw(vcpu, msw);
+ kvm_lmsw(vcpu, msw);
*rflags = kvm_x86_ops->get_rflags(vcpu);
}
@@ -2346,9 +2559,9 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
case 4:
return vcpu->arch.cr4;
case 8:
- return get_cr8(vcpu);
+ return kvm_get_cr8(vcpu);
default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+ vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
return 0;
}
}
@@ -2358,23 +2571,23 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
{
switch (cr) {
case 0:
- set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
+ kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
*rflags = kvm_x86_ops->get_rflags(vcpu);
break;
case 2:
vcpu->arch.cr2 = val;
break;
case 3:
- set_cr3(vcpu, val);
+ kvm_set_cr3(vcpu, val);
break;
case 4:
- set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
+ kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
break;
case 8:
- set_cr8(vcpu, val & 0xfUL);
+ kvm_set_cr8(vcpu, val & 0xfUL);
break;
default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+ vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
}
}
@@ -2447,6 +2660,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
}
kvm_x86_ops->decache_regs(vcpu);
kvm_x86_ops->skip_emulated_instruction(vcpu);
+ KVMTRACE_5D(CPUID, vcpu, function,
+ (u32)vcpu->arch.regs[VCPU_REGS_RAX],
+ (u32)vcpu->arch.regs[VCPU_REGS_RBX],
+ (u32)vcpu->arch.regs[VCPU_REGS_RCX],
+ (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
@@ -2469,7 +2687,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
- kvm_run->cr8 = get_cr8(vcpu);
+ kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
kvm_run->ready_for_interrupt_injection = 1;
@@ -2509,16 +2727,17 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
- if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
+ if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
pr_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, vcpu->arch.sipi_vector);
kvm_lapic_reset(vcpu);
r = kvm_x86_ops->vcpu_reset(vcpu);
if (r)
return r;
- vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
+ down_read(&vcpu->kvm->slots_lock);
vapic_enter(vcpu);
preempted:
@@ -2526,6 +2745,10 @@ preempted:
kvm_x86_ops->guest_debug_pre(vcpu);
again:
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+ kvm_mmu_unload(vcpu);
+
r = kvm_mmu_reload(vcpu);
if (unlikely(r))
goto out;
@@ -2539,6 +2762,11 @@ again:
r = 0;
goto out;
}
+ if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
+ kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+ r = 0;
+ goto out;
+ }
}
kvm_inject_pending_timer_irqs(vcpu);
@@ -2557,6 +2785,14 @@ again:
goto out;
}
+ if (vcpu->requests)
+ if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
+ local_irq_enable();
+ preempt_enable();
+ r = 1;
+ goto out;
+ }
+
if (signal_pending(current)) {
local_irq_enable();
preempt_enable();
@@ -2566,6 +2802,13 @@ again:
goto out;
}
+ vcpu->guest_mode = 1;
+ /*
+ * Make sure that guest_mode assignment won't happen after
+ * testing the pending IRQ vector bitmap.
+ */
+ smp_wmb();
+
if (vcpu->arch.exception.pending)
__queue_exception(vcpu);
else if (irqchip_in_kernel(vcpu->kvm))
@@ -2575,13 +2818,15 @@ again:
kvm_lapic_sync_to_vapic(vcpu);
- vcpu->guest_mode = 1;
+ up_read(&vcpu->kvm->slots_lock);
+
kvm_guest_enter();
if (vcpu->requests)
if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
kvm_x86_ops->tlb_flush(vcpu);
+ KVMTRACE_0D(VMENTRY, vcpu, entryexit);
kvm_x86_ops->run(vcpu, kvm_run);
vcpu->guest_mode = 0;
@@ -2601,6 +2846,8 @@ again:
preempt_enable();
+ down_read(&vcpu->kvm->slots_lock);
+
/*
* Profile KVM exit RIPs:
*/
@@ -2628,14 +2875,18 @@ again:
}
out:
+ up_read(&vcpu->kvm->slots_lock);
if (r > 0) {
kvm_resched(vcpu);
+ down_read(&vcpu->kvm->slots_lock);
goto preempted;
}
post_kvm_run_save(vcpu, kvm_run);
+ down_read(&vcpu->kvm->slots_lock);
vapic_exit(vcpu);
+ up_read(&vcpu->kvm->slots_lock);
return r;
}
@@ -2647,7 +2898,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu_load(vcpu);
- if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
+ if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
vcpu_put(vcpu);
return -EAGAIN;
@@ -2658,7 +2909,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* re-sync apic's tpr */
if (!irqchip_in_kernel(vcpu->kvm))
- set_cr8(vcpu, kvm_run->cr8);
+ kvm_set_cr8(vcpu, kvm_run->cr8);
if (vcpu->arch.pio.cur_count) {
r = complete_pio(vcpu);
@@ -2670,9 +2921,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0;
+
+ down_read(&vcpu->kvm->slots_lock);
r = emulate_instruction(vcpu, kvm_run,
vcpu->arch.mmio_fault_cr2, 0,
EMULTYPE_NO_DECODE);
+ up_read(&vcpu->kvm->slots_lock);
if (r == EMULATE_DO_MMIO) {
/*
* Read-modify-write. Back to userspace.
@@ -2773,7 +3027,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
static void get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
- return kvm_x86_ops->get_segment(vcpu, var, seg);
+ kvm_x86_ops->get_segment(vcpu, var, seg);
}
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -2816,7 +3070,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->cr2 = vcpu->arch.cr2;
sregs->cr3 = vcpu->arch.cr3;
sregs->cr4 = vcpu->arch.cr4;
- sregs->cr8 = get_cr8(vcpu);
+ sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.shadow_efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
@@ -2836,12 +3090,438 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
return 0;
}
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ vcpu_load(vcpu);
+ mp_state->mp_state = vcpu->arch.mp_state;
+ vcpu_put(vcpu);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ vcpu_load(vcpu);
+ vcpu->arch.mp_state = mp_state->mp_state;
+ vcpu_put(vcpu);
+ return 0;
+}
+
static void set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
- return kvm_x86_ops->set_segment(vcpu, var, seg);
+ kvm_x86_ops->set_segment(vcpu, var, seg);
+}
+
+static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
+ struct kvm_segment *kvm_desct)
+{
+ kvm_desct->base = seg_desc->base0;
+ kvm_desct->base |= seg_desc->base1 << 16;
+ kvm_desct->base |= seg_desc->base2 << 24;
+ kvm_desct->limit = seg_desc->limit0;
+ kvm_desct->limit |= seg_desc->limit << 16;
+ kvm_desct->selector = selector;
+ kvm_desct->type = seg_desc->type;
+ kvm_desct->present = seg_desc->p;
+ kvm_desct->dpl = seg_desc->dpl;
+ kvm_desct->db = seg_desc->d;
+ kvm_desct->s = seg_desc->s;
+ kvm_desct->l = seg_desc->l;
+ kvm_desct->g = seg_desc->g;
+ kvm_desct->avl = seg_desc->avl;
+ if (!selector)
+ kvm_desct->unusable = 1;
+ else
+ kvm_desct->unusable = 0;
+ kvm_desct->padding = 0;
+}
+
+static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
+ u16 selector,
+ struct descriptor_table *dtable)
+{
+ if (selector & 1 << 2) {
+ struct kvm_segment kvm_seg;
+
+ get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
+
+ if (kvm_seg.unusable)
+ dtable->limit = 0;
+ else
+ dtable->limit = kvm_seg.limit;
+ dtable->base = kvm_seg.base;
+ }
+ else
+ kvm_x86_ops->get_gdt(vcpu, dtable);
+}
+
+/* allowed just for 8 bytes segments */
+static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+ struct desc_struct *seg_desc)
+{
+ struct descriptor_table dtable;
+ u16 index = selector >> 3;
+
+ get_segment_descritptor_dtable(vcpu, selector, &dtable);
+
+ if (dtable.limit < index * 8 + 7) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
+ return 1;
+ }
+ return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+}
+
+/* allowed just for 8 bytes segments */
+static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+ struct desc_struct *seg_desc)
+{
+ struct descriptor_table dtable;
+ u16 index = selector >> 3;
+
+ get_segment_descritptor_dtable(vcpu, selector, &dtable);
+
+ if (dtable.limit < index * 8 + 7)
+ return 1;
+ return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+}
+
+static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc)
+{
+ u32 base_addr;
+
+ base_addr = seg_desc->base0;
+ base_addr |= (seg_desc->base1 << 16);
+ base_addr |= (seg_desc->base2 << 24);
+
+ return base_addr;
+}
+
+static int load_tss_segment32(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc,
+ struct tss_segment_32 *tss)
+{
+ u32 base_addr;
+
+ base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+ return kvm_read_guest(vcpu->kvm, base_addr, tss,
+ sizeof(struct tss_segment_32));
+}
+
+static int save_tss_segment32(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc,
+ struct tss_segment_32 *tss)
+{
+ u32 base_addr;
+
+ base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+ return kvm_write_guest(vcpu->kvm, base_addr, tss,
+ sizeof(struct tss_segment_32));
+}
+
+static int load_tss_segment16(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc,
+ struct tss_segment_16 *tss)
+{
+ u32 base_addr;
+
+ base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+ return kvm_read_guest(vcpu->kvm, base_addr, tss,
+ sizeof(struct tss_segment_16));
+}
+
+static int save_tss_segment16(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc,
+ struct tss_segment_16 *tss)
+{
+ u32 base_addr;
+
+ base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+ return kvm_write_guest(vcpu->kvm, base_addr, tss,
+ sizeof(struct tss_segment_16));
+}
+
+static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
+{
+ struct kvm_segment kvm_seg;
+
+ get_segment(vcpu, &kvm_seg, seg);
+ return kvm_seg.selector;
+}
+
+static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
+ u16 selector,
+ struct kvm_segment *kvm_seg)
+{
+ struct desc_struct seg_desc;
+
+ if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
+ return 1;
+ seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
+ return 0;
+}
+
+static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+ int type_bits, int seg)
+{
+ struct kvm_segment kvm_seg;
+
+ if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
+ return 1;
+ kvm_seg.type |= type_bits;
+
+ if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
+ seg != VCPU_SREG_LDTR)
+ if (!kvm_seg.s)
+ kvm_seg.unusable = 1;
+
+ set_segment(vcpu, &kvm_seg, seg);
+ return 0;
+}
+
+static void save_state_to_tss32(struct kvm_vcpu *vcpu,
+ struct tss_segment_32 *tss)
+{
+ tss->cr3 = vcpu->arch.cr3;
+ tss->eip = vcpu->arch.rip;
+ tss->eflags = kvm_x86_ops->get_rflags(vcpu);
+ tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
+ tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
+ tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
+ tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
+ tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
+ tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
+ tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
+ tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
+
+ tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
+ tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
+ tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
+ tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
+ tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
+ tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
+ tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
+ tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
+}
+
+static int load_state_from_tss32(struct kvm_vcpu *vcpu,
+ struct tss_segment_32 *tss)
+{
+ kvm_set_cr3(vcpu, tss->cr3);
+
+ vcpu->arch.rip = tss->eip;
+ kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
+
+ vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
+ vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
+ vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
+ vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
+ vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
+ vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
+ vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
+ vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
+
+ if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
+ return 1;
+ return 0;
+}
+
+static void save_state_to_tss16(struct kvm_vcpu *vcpu,
+ struct tss_segment_16 *tss)
+{
+ tss->ip = vcpu->arch.rip;
+ tss->flag = kvm_x86_ops->get_rflags(vcpu);
+ tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
+ tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
+ tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
+ tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
+ tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
+ tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
+ tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
+ tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
+
+ tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
+ tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
+ tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
+ tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
+ tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
+ tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
+}
+
+static int load_state_from_tss16(struct kvm_vcpu *vcpu,
+ struct tss_segment_16 *tss)
+{
+ vcpu->arch.rip = tss->ip;
+ kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
+ vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
+ vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
+ vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
+ vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
+ vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
+ vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
+ vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
+ vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
+
+ if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+ return 1;
+
+ if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+ return 1;
+ return 0;
+}
+
+int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
+ struct desc_struct *cseg_desc,
+ struct desc_struct *nseg_desc)
+{
+ struct tss_segment_16 tss_segment_16;
+ int ret = 0;
+
+ if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
+ goto out;
+
+ save_state_to_tss16(vcpu, &tss_segment_16);
+ save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
+
+ if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
+ goto out;
+ if (load_state_from_tss16(vcpu, &tss_segment_16))
+ goto out;
+
+ ret = 1;
+out:
+ return ret;
+}
+
+int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
+ struct desc_struct *cseg_desc,
+ struct desc_struct *nseg_desc)
+{
+ struct tss_segment_32 tss_segment_32;
+ int ret = 0;
+
+ if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
+ goto out;
+
+ save_state_to_tss32(vcpu, &tss_segment_32);
+ save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
+
+ if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
+ goto out;
+ if (load_state_from_tss32(vcpu, &tss_segment_32))
+ goto out;
+
+ ret = 1;
+out:
+ return ret;
}
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
+{
+ struct kvm_segment tr_seg;
+ struct desc_struct cseg_desc;
+ struct desc_struct nseg_desc;
+ int ret = 0;
+
+ get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+
+ if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
+ goto out;
+
+ if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
+ goto out;
+
+
+ if (reason != TASK_SWITCH_IRET) {
+ int cpl;
+
+ cpl = kvm_x86_ops->get_cpl(vcpu);
+ if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
+ }
+
+ if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
+ kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
+ return 1;
+ }
+
+ if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
+ cseg_desc.type &= ~(1 << 8); //clear the B flag
+ save_guest_segment_descriptor(vcpu, tr_seg.selector,
+ &cseg_desc);
+ }
+
+ if (reason == TASK_SWITCH_IRET) {
+ u32 eflags = kvm_x86_ops->get_rflags(vcpu);
+ kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
+ }
+
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ kvm_x86_ops->cache_regs(vcpu);
+
+ if (nseg_desc.type & 8)
+ ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
+ &nseg_desc);
+ else
+ ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
+ &nseg_desc);
+
+ if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
+ u32 eflags = kvm_x86_ops->get_rflags(vcpu);
+ kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
+ }
+
+ if (reason != TASK_SWITCH_IRET) {
+ nseg_desc.type |= (1 << 8);
+ save_guest_segment_descriptor(vcpu, tss_selector,
+ &nseg_desc);
+ }
+
+ kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
+ seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
+ tr_seg.type = 11;
+ set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+out:
+ kvm_x86_ops->decache_regs(vcpu);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_task_switch);
+
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
@@ -2862,12 +3542,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
- set_cr8(vcpu, sregs->cr8);
+ kvm_set_cr8(vcpu, sregs->cr8);
mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
-#ifdef CONFIG_X86_64
kvm_x86_ops->set_efer(vcpu, sregs->efer);
-#endif
kvm_set_apic_base(vcpu, sregs->apic_base);
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
@@ -3141,9 +3819,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
- vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
- vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED;
+ vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
@@ -3175,7 +3853,9 @@ fail:
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
kvm_free_lapic(vcpu);
+ down_read(&vcpu->kvm->slots_lock);
kvm_mmu_destroy(vcpu);
+ up_read(&vcpu->kvm->slots_lock);
free_page((unsigned long)vcpu->arch.pio_data);
}
@@ -3219,10 +3899,13 @@ static void kvm_free_vcpus(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
+ kvm_free_pit(kvm);
kfree(kvm->arch.vpic);
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
kvm_free_physmem(kvm);
+ if (kvm->arch.apic_access_page)
+ put_page(kvm->arch.apic_access_page);
kfree(kvm);
}
@@ -3278,8 +3961,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
- || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
+ return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
+ || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
}
static void vcpu_kick_intr(void *info)
@@ -3293,11 +3976,17 @@ static void vcpu_kick_intr(void *info)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int ipi_pcpu = vcpu->cpu;
+ int cpu = get_cpu();
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
++vcpu->stat.halt_wakeup;
}
- if (vcpu->guest_mode)
+ /*
+ * We may be called synchronously with irqs disabled in guest mode,
+ * So need not to call smp_call_function_single() in that case.
+ */
+ if (vcpu->guest_mode && vcpu->cpu != cpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
+ put_cpu();
}
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index 79586003397..2ca08386f99 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -65,6 +65,14 @@
#define MemAbs (1<<9) /* Memory operand is absolute displacement */
#define String (1<<10) /* String instruction (rep capable) */
#define Stack (1<<11) /* Stack instruction (push/pop) */
+#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
+#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
+#define GroupMask 0xff /* Group number stored in bits 0:7 */
+
+enum {
+ Group1_80, Group1_81, Group1_82, Group1_83,
+ Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
+};
static u16 opcode_table[256] = {
/* 0x00 - 0x07 */
@@ -123,14 +131,14 @@ static u16 opcode_table[256] = {
ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
/* 0x80 - 0x87 */
- ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
- ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
+ Group | Group1_80, Group | Group1_81,
+ Group | Group1_82, Group | Group1_83,
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
/* 0x88 - 0x8F */
ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
- 0, ModRM | DstReg, 0, DstMem | SrcNone | ModRM | Mov | Stack,
+ 0, ModRM | DstReg, 0, Group | Group1A,
/* 0x90 - 0x9F */
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
@@ -164,16 +172,15 @@ static u16 opcode_table[256] = {
0, 0, 0, 0,
/* 0xF0 - 0xF7 */
0, 0, 0, 0,
- ImplicitOps, ImplicitOps,
- ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
+ ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3,
/* 0xF8 - 0xFF */
ImplicitOps, 0, ImplicitOps, ImplicitOps,
- 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
+ 0, 0, Group | Group4, Group | Group5,
};
static u16 twobyte_table[256] = {
/* 0x00 - 0x0F */
- 0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
+ 0, Group | GroupDual | Group7, 0, 0, 0, 0, ImplicitOps, 0,
ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
/* 0x10 - 0x1F */
0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
@@ -229,6 +236,56 @@ static u16 twobyte_table[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
+static u16 group_table[] = {
+ [Group1_80*8] =
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ [Group1_81*8] =
+ DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+ DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+ DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+ DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+ [Group1_82*8] =
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+ [Group1_83*8] =
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+ [Group1A*8] =
+ DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
+ [Group3_Byte*8] =
+ ByteOp | SrcImm | DstMem | ModRM, 0,
+ ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
+ 0, 0, 0, 0,
+ [Group3*8] =
+ DstMem | SrcImm | ModRM | SrcImm, 0,
+ DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
+ 0, 0, 0, 0,
+ [Group4*8] =
+ ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
+ 0, 0, 0, 0, 0, 0,
+ [Group5*8] =
+ DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 0, 0,
+ SrcMem | ModRM, 0, SrcMem | ModRM | Stack, 0,
+ [Group7*8] =
+ 0, 0, ModRM | SrcMem, ModRM | SrcMem,
+ SrcNone | ModRM | DstMem | Mov, 0,
+ SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp,
+};
+
+static u16 group2_table[] = {
+ [Group7*8] =
+ SrcNone | ModRM, 0, 0, 0,
+ SrcNone | ModRM | DstMem | Mov, 0,
+ SrcMem16 | ModRM | Mov, 0,
+};
+
/* EFLAGS bit definitions. */
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
@@ -317,7 +374,7 @@ static u16 twobyte_table[256] = {
#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
do { \
- unsigned long _tmp; \
+ unsigned long __tmp; \
switch ((_dst).bytes) { \
case 1: \
__asm__ __volatile__ ( \
@@ -325,7 +382,7 @@ static u16 twobyte_table[256] = {
_op"b %"_bx"3,%1; " \
_POST_EFLAGS("0", "4", "2") \
: "=m" (_eflags), "=m" ((_dst).val), \
- "=&r" (_tmp) \
+ "=&r" (__tmp) \
: _by ((_src).val), "i" (EFLAGS_MASK)); \
break; \
default: \
@@ -426,29 +483,40 @@ static u16 twobyte_table[256] = {
(_type)_x; \
})
+static inline unsigned long ad_mask(struct decode_cache *c)
+{
+ return (1UL << (c->ad_bytes << 3)) - 1;
+}
+
/* Access/update address held in a register, based on addressing mode. */
-#define address_mask(reg) \
- ((c->ad_bytes == sizeof(unsigned long)) ? \
- (reg) : ((reg) & ((1UL << (c->ad_bytes << 3)) - 1)))
-#define register_address(base, reg) \
- ((base) + address_mask(reg))
-#define register_address_increment(reg, inc) \
- do { \
- /* signed type ensures sign extension to long */ \
- int _inc = (inc); \
- if (c->ad_bytes == sizeof(unsigned long)) \
- (reg) += _inc; \
- else \
- (reg) = ((reg) & \
- ~((1UL << (c->ad_bytes << 3)) - 1)) | \
- (((reg) + _inc) & \
- ((1UL << (c->ad_bytes << 3)) - 1)); \
- } while (0)
+static inline unsigned long
+address_mask(struct decode_cache *c, unsigned long reg)
+{
+ if (c->ad_bytes == sizeof(unsigned long))
+ return reg;
+ else
+ return reg & ad_mask(c);
+}
-#define JMP_REL(rel) \
- do { \
- register_address_increment(c->eip, rel); \
- } while (0)
+static inline unsigned long
+register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
+{
+ return base + address_mask(c, reg);
+}
+
+static inline void
+register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
+{
+ if (c->ad_bytes == sizeof(unsigned long))
+ *reg += inc;
+ else
+ *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
+}
+
+static inline void jmp_rel(struct decode_cache *c, int rel)
+{
+ register_address_increment(c, &c->eip, rel);
+}
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
@@ -763,7 +831,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
struct decode_cache *c = &ctxt->decode;
int rc = 0;
int mode = ctxt->mode;
- int def_op_bytes, def_ad_bytes;
+ int def_op_bytes, def_ad_bytes, group;
/* Shadow copy of register state. Committed on successful emulation. */
@@ -864,12 +932,24 @@ done_prefixes:
c->b = insn_fetch(u8, 1, c->eip);
c->d = twobyte_table[c->b];
}
+ }
- /* Unrecognised? */
- if (c->d == 0) {
- DPRINTF("Cannot emulate %02x\n", c->b);
- return -1;
- }
+ if (c->d & Group) {
+ group = c->d & GroupMask;
+ c->modrm = insn_fetch(u8, 1, c->eip);
+ --c->eip;
+
+ group = (group << 3) + ((c->modrm >> 3) & 7);
+ if ((c->d & GroupDual) && (c->modrm >> 6) == 3)
+ c->d = group2_table[group];
+ else
+ c->d = group_table[group];
+ }
+
+ /* Unrecognised? */
+ if (c->d == 0) {
+ DPRINTF("Cannot emulate %02x\n", c->b);
+ return -1;
}
if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
@@ -924,6 +1004,7 @@ done_prefixes:
*/
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->src.type = OP_REG;
+ c->src.val = c->modrm_val;
break;
}
c->src.type = OP_MEM;
@@ -967,6 +1048,7 @@ done_prefixes:
case DstMem:
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->dst.type = OP_REG;
+ c->dst.val = c->dst.orig_val = c->modrm_val;
break;
}
c->dst.type = OP_MEM;
@@ -984,8 +1066,8 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
c->dst.type = OP_MEM;
c->dst.bytes = c->op_bytes;
c->dst.val = c->src.val;
- register_address_increment(c->regs[VCPU_REGS_RSP], -c->op_bytes);
- c->dst.ptr = (void *) register_address(ctxt->ss_base,
+ register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
+ c->dst.ptr = (void *) register_address(c, ctxt->ss_base,
c->regs[VCPU_REGS_RSP]);
}
@@ -995,13 +1077,13 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
struct decode_cache *c = &ctxt->decode;
int rc;
- rc = ops->read_std(register_address(ctxt->ss_base,
+ rc = ops->read_std(register_address(c, ctxt->ss_base,
c->regs[VCPU_REGS_RSP]),
&c->dst.val, c->dst.bytes, ctxt->vcpu);
if (rc != 0)
return rc;
- register_address_increment(c->regs[VCPU_REGS_RSP], c->dst.bytes);
+ register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->dst.bytes);
return 0;
}
@@ -1043,26 +1125,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
switch (c->modrm_reg) {
case 0 ... 1: /* test */
- /*
- * Special case in Grp3: test has an immediate
- * source operand.
- */
- c->src.type = OP_IMM;
- c->src.ptr = (unsigned long *)c->eip;
- c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- if (c->src.bytes == 8)
- c->src.bytes = 4;
- switch (c->src.bytes) {
- case 1:
- c->src.val = insn_fetch(s8, 1, c->eip);
- break;
- case 2:
- c->src.val = insn_fetch(s16, 2, c->eip);
- break;
- case 4:
- c->src.val = insn_fetch(s32, 4, c->eip);
- break;
- }
emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
break;
case 2: /* not */
@@ -1076,7 +1138,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
rc = X86EMUL_UNHANDLEABLE;
break;
}
-done:
return rc;
}
@@ -1084,7 +1145,6 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc;
switch (c->modrm_reg) {
case 0: /* inc */
@@ -1094,36 +1154,11 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
emulate_1op("dec", c->dst, ctxt->eflags);
break;
case 4: /* jmp abs */
- if (c->b == 0xff)
- c->eip = c->dst.val;
- else {
- DPRINTF("Cannot emulate %02x\n", c->b);
- return X86EMUL_UNHANDLEABLE;
- }
+ c->eip = c->src.val;
break;
case 6: /* push */
-
- /* 64-bit mode: PUSH always pushes a 64-bit operand. */
-
- if (ctxt->mode == X86EMUL_MODE_PROT64) {
- c->dst.bytes = 8;
- rc = ops->read_std((unsigned long)c->dst.ptr,
- &c->dst.val, 8, ctxt->vcpu);
- if (rc != 0)
- return rc;
- }
- register_address_increment(c->regs[VCPU_REGS_RSP],
- -c->dst.bytes);
- rc = ops->write_emulated(register_address(ctxt->ss_base,
- c->regs[VCPU_REGS_RSP]), &c->dst.val,
- c->dst.bytes, ctxt->vcpu);
- if (rc != 0)
- return rc;
- c->dst.type = OP_NONE;
+ emulate_push(ctxt);
break;
- default:
- DPRINTF("Cannot emulate %02x\n", c->b);
- return X86EMUL_UNHANDLEABLE;
}
return 0;
}
@@ -1361,19 +1396,19 @@ special_insn:
c->dst.type = OP_MEM;
c->dst.bytes = c->op_bytes;
c->dst.val = c->src.val;
- register_address_increment(c->regs[VCPU_REGS_RSP],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSP],
-c->op_bytes);
c->dst.ptr = (void *) register_address(
- ctxt->ss_base, c->regs[VCPU_REGS_RSP]);
+ c, ctxt->ss_base, c->regs[VCPU_REGS_RSP]);
break;
case 0x58 ... 0x5f: /* pop reg */
pop_instruction:
- if ((rc = ops->read_std(register_address(ctxt->ss_base,
+ if ((rc = ops->read_std(register_address(c, ctxt->ss_base,
c->regs[VCPU_REGS_RSP]), c->dst.ptr,
c->op_bytes, ctxt->vcpu)) != 0)
goto done;
- register_address_increment(c->regs[VCPU_REGS_RSP],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSP],
c->op_bytes);
c->dst.type = OP_NONE; /* Disable writeback. */
break;
@@ -1393,9 +1428,9 @@ special_insn:
1,
(c->d & ByteOp) ? 1 : c->op_bytes,
c->rep_prefix ?
- address_mask(c->regs[VCPU_REGS_RCX]) : 1,
+ address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
(ctxt->eflags & EFLG_DF),
- register_address(ctxt->es_base,
+ register_address(c, ctxt->es_base,
c->regs[VCPU_REGS_RDI]),
c->rep_prefix,
c->regs[VCPU_REGS_RDX]) == 0) {
@@ -1409,9 +1444,9 @@ special_insn:
0,
(c->d & ByteOp) ? 1 : c->op_bytes,
c->rep_prefix ?
- address_mask(c->regs[VCPU_REGS_RCX]) : 1,
+ address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
(ctxt->eflags & EFLG_DF),
- register_address(c->override_base ?
+ register_address(c, c->override_base ?
*c->override_base :
ctxt->ds_base,
c->regs[VCPU_REGS_RSI]),
@@ -1425,7 +1460,7 @@ special_insn:
int rel = insn_fetch(s8, 1, c->eip);
if (test_cc(c->b, ctxt->eflags))
- JMP_REL(rel);
+ jmp_rel(c, rel);
break;
}
case 0x80 ... 0x83: /* Grp1 */
@@ -1477,7 +1512,7 @@ special_insn:
case 0x88 ... 0x8b: /* mov */
goto mov;
case 0x8d: /* lea r16/r32, m */
- c->dst.val = c->modrm_val;
+ c->dst.val = c->modrm_ea;
break;
case 0x8f: /* pop (sole member of Grp1a) */
rc = emulate_grp1a(ctxt, ops);
@@ -1501,27 +1536,27 @@ special_insn:
case 0xa4 ... 0xa5: /* movs */
c->dst.type = OP_MEM;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(
+ c->dst.ptr = (unsigned long *)register_address(c,
ctxt->es_base,
c->regs[VCPU_REGS_RDI]);
- if ((rc = ops->read_emulated(register_address(
+ if ((rc = ops->read_emulated(register_address(c,
c->override_base ? *c->override_base :
ctxt->ds_base,
c->regs[VCPU_REGS_RSI]),
&c->dst.val,
c->dst.bytes, ctxt->vcpu)) != 0)
goto done;
- register_address_increment(c->regs[VCPU_REGS_RSI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
- register_address_increment(c->regs[VCPU_REGS_RDI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RDI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
break;
case 0xa6 ... 0xa7: /* cmps */
c->src.type = OP_NONE; /* Disable writeback. */
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->src.ptr = (unsigned long *)register_address(
+ c->src.ptr = (unsigned long *)register_address(c,
c->override_base ? *c->override_base :
ctxt->ds_base,
c->regs[VCPU_REGS_RSI]);
@@ -1533,7 +1568,7 @@ special_insn:
c->dst.type = OP_NONE; /* Disable writeback. */
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(
+ c->dst.ptr = (unsigned long *)register_address(c,
ctxt->es_base,
c->regs[VCPU_REGS_RDI]);
if ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
@@ -1546,10 +1581,10 @@ special_insn:
emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
- register_address_increment(c->regs[VCPU_REGS_RSI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->src.bytes
: c->src.bytes);
- register_address_increment(c->regs[VCPU_REGS_RDI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RDI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
@@ -1557,11 +1592,11 @@ special_insn:
case 0xaa ... 0xab: /* stos */
c->dst.type = OP_MEM;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(
+ c->dst.ptr = (unsigned long *)register_address(c,
ctxt->es_base,
c->regs[VCPU_REGS_RDI]);
c->dst.val = c->regs[VCPU_REGS_RAX];
- register_address_increment(c->regs[VCPU_REGS_RDI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RDI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
break;
@@ -1569,7 +1604,7 @@ special_insn:
c->dst.type = OP_REG;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
- if ((rc = ops->read_emulated(register_address(
+ if ((rc = ops->read_emulated(register_address(c,
c->override_base ? *c->override_base :
ctxt->ds_base,
c->regs[VCPU_REGS_RSI]),
@@ -1577,7 +1612,7 @@ special_insn:
c->dst.bytes,
ctxt->vcpu)) != 0)
goto done;
- register_address_increment(c->regs[VCPU_REGS_RSI],
+ register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes
: c->dst.bytes);
break;
@@ -1616,14 +1651,14 @@ special_insn:
goto cannot_emulate;
}
c->src.val = (unsigned long) c->eip;
- JMP_REL(rel);
+ jmp_rel(c, rel);
c->op_bytes = c->ad_bytes;
emulate_push(ctxt);
break;
}
case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */
- JMP_REL(c->src.val);
+ jmp_rel(c, c->src.val);
c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
@@ -1690,6 +1725,8 @@ twobyte_insn:
goto done;
kvm_emulate_hypercall(ctxt->vcpu);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
break;
case 2: /* lgdt */
rc = read_descriptor(ctxt, ops, c->src.ptr,
@@ -1697,6 +1734,8 @@ twobyte_insn:
if (rc)
goto done;
realmode_lgdt(ctxt->vcpu, size, address);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
break;
case 3: /* lidt/vmmcall */
if (c->modrm_mod == 3 && c->modrm_rm == 1) {
@@ -1712,27 +1751,25 @@ twobyte_insn:
goto done;
realmode_lidt(ctxt->vcpu, size, address);
}
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
break;
case 4: /* smsw */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- *(u16 *)&c->regs[c->modrm_rm]
- = realmode_get_cr(ctxt->vcpu, 0);
+ c->dst.bytes = 2;
+ c->dst.val = realmode_get_cr(ctxt->vcpu, 0);
break;
case 6: /* lmsw */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- realmode_lmsw(ctxt->vcpu, (u16)c->modrm_val,
- &ctxt->eflags);
+ realmode_lmsw(ctxt->vcpu, (u16)c->src.val,
+ &ctxt->eflags);
break;
case 7: /* invlpg*/
emulate_invlpg(ctxt->vcpu, memop);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
break;
default:
goto cannot_emulate;
}
- /* Disable writeback. */
- c->dst.type = OP_NONE;
break;
case 0x06:
emulate_clts(ctxt->vcpu);
@@ -1823,7 +1860,7 @@ twobyte_insn:
goto cannot_emulate;
}
if (test_cc(c->b, ctxt->eflags))
- JMP_REL(rel);
+ jmp_rel(c, rel);
c->dst.type = OP_NONE;
break;
}
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 25df1c1989f..76f60f52a88 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -11,7 +11,7 @@ lib-y += memcpy_$(BITS).o
ifeq ($(CONFIG_X86_32),y)
lib-y += checksum_32.o
lib-y += strstr_32.o
- lib-y += bitops_32.o semaphore_32.o string_32.o
+ lib-y += semaphore_32.o string_32.o
lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
else
@@ -21,7 +21,6 @@ else
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
lib-y += thunk_64.o clear_page_64.o copy_page_64.o
- lib-y += bitops_64.o
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
endif
diff --git a/arch/x86/lib/bitops_32.c b/arch/x86/lib/bitops_32.c
deleted file mode 100644
index b6544045985..00000000000
--- a/arch/x86/lib/bitops_32.c
+++ /dev/null
@@ -1,70 +0,0 @@
-#include <linux/bitops.h>
-#include <linux/module.h>
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-int find_next_bit(const unsigned long *addr, int size, int offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- int set = 0, bit = offset & 31, res;
-
- if (bit) {
- /*
- * Look for nonzero in the first 32 bits:
- */
- __asm__("bsfl %1,%0\n\t"
- "jne 1f\n\t"
- "movl $32, %0\n"
- "1:"
- : "=r" (set)
- : "r" (*p >> bit));
- if (set < (32 - bit))
- return set + offset;
- set = 32 - bit;
- p++;
- }
- /*
- * No set bit yet, search remaining full words for a bit
- */
- res = find_first_bit (p, size - 32 * (p - addr));
- return (offset + set + res);
-}
-EXPORT_SYMBOL(find_next_bit);
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-int find_next_zero_bit(const unsigned long *addr, int size, int offset)
-{
- const unsigned long *p = addr + (offset >> 5);
- int set = 0, bit = offset & 31, res;
-
- if (bit) {
- /*
- * Look for zero in the first 32 bits.
- */
- __asm__("bsfl %1,%0\n\t"
- "jne 1f\n\t"
- "movl $32, %0\n"
- "1:"
- : "=r" (set)
- : "r" (~(*p >> bit)));
- if (set < (32 - bit))
- return set + offset;
- set = 32 - bit;
- p++;
- }
- /*
- * No zero yet, search remaining full bytes for a zero
- */
- res = find_first_zero_bit(p, size - 32 * (p - addr));
- return (offset + set + res);
-}
-EXPORT_SYMBOL(find_next_zero_bit);
diff --git a/arch/x86/lib/bitops_64.c b/arch/x86/lib/bitops_64.c
deleted file mode 100644
index 0e8f491e6cc..00000000000
--- a/arch/x86/lib/bitops_64.c
+++ /dev/null
@@ -1,175 +0,0 @@
-#include <linux/bitops.h>
-
-#undef find_first_zero_bit
-#undef find_next_zero_bit
-#undef find_first_bit
-#undef find_next_bit
-
-static inline long
-__find_first_zero_bit(const unsigned long * addr, unsigned long size)
-{
- long d0, d1, d2;
- long res;
-
- /*
- * We must test the size in words, not in bits, because
- * otherwise incoming sizes in the range -63..-1 will not run
- * any scasq instructions, and then the flags used by the je
- * instruction will have whatever random value was in place
- * before. Nobody should call us like that, but
- * find_next_zero_bit() does when offset and size are at the
- * same word and it fails to find a zero itself.
- */
- size += 63;
- size >>= 6;
- if (!size)
- return 0;
- asm volatile(
- " repe; scasq\n"
- " je 1f\n"
- " xorq -8(%%rdi),%%rax\n"
- " subq $8,%%rdi\n"
- " bsfq %%rax,%%rdx\n"
- "1: subq %[addr],%%rdi\n"
- " shlq $3,%%rdi\n"
- " addq %%rdi,%%rdx"
- :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
- :"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL),
- [addr] "S" (addr) : "memory");
- /*
- * Any register would do for [addr] above, but GCC tends to
- * prefer rbx over rsi, even though rsi is readily available
- * and doesn't have to be saved.
- */
- return res;
-}
-
-/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
- */
-long find_first_zero_bit(const unsigned long * addr, unsigned long size)
-{
- return __find_first_zero_bit (addr, size);
-}
-
-/**
- * find_next_zero_bit - find the next zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-long find_next_zero_bit (const unsigned long * addr, long size, long offset)
-{
- const unsigned long * p = addr + (offset >> 6);
- unsigned long set = 0;
- unsigned long res, bit = offset&63;
-
- if (bit) {
- /*
- * Look for zero in first word
- */
- asm("bsfq %1,%0\n\t"
- "cmoveq %2,%0"
- : "=r" (set)
- : "r" (~(*p >> bit)), "r"(64L));
- if (set < (64 - bit))
- return set + offset;
- set = 64 - bit;
- p++;
- }
- /*
- * No zero yet, search remaining full words for a zero
- */
- res = __find_first_zero_bit (p, size - 64 * (p - addr));
-
- return (offset + set + res);
-}
-
-static inline long
-__find_first_bit(const unsigned long * addr, unsigned long size)
-{
- long d0, d1;
- long res;
-
- /*
- * We must test the size in words, not in bits, because
- * otherwise incoming sizes in the range -63..-1 will not run
- * any scasq instructions, and then the flags used by the jz
- * instruction will have whatever random value was in place
- * before. Nobody should call us like that, but
- * find_next_bit() does when offset and size are at the same
- * word and it fails to find a one itself.
- */
- size += 63;
- size >>= 6;
- if (!size)
- return 0;
- asm volatile(
- " repe; scasq\n"
- " jz 1f\n"
- " subq $8,%%rdi\n"
- " bsfq (%%rdi),%%rax\n"
- "1: subq %[addr],%%rdi\n"
- " shlq $3,%%rdi\n"
- " addq %%rdi,%%rax"
- :"=a" (res), "=&c" (d0), "=&D" (d1)
- :"0" (0ULL), "1" (size), "2" (addr),
- [addr] "r" (addr) : "memory");
- return res;
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-long find_first_bit(const unsigned long * addr, unsigned long size)
-{
- return __find_first_bit(addr,size);
-}
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-long find_next_bit(const unsigned long * addr, long size, long offset)
-{
- const unsigned long * p = addr + (offset >> 6);
- unsigned long set = 0, bit = offset & 63, res;
-
- if (bit) {
- /*
- * Look for nonzero in the first 64 bits:
- */
- asm("bsfq %1,%0\n\t"
- "cmoveq %2,%0\n\t"
- : "=r" (set)
- : "r" (*p >> bit), "r" (64L));
- if (set < (64 - bit))
- return set + offset;
- set = 64 - bit;
- p++;
- }
- /*
- * No set bit yet, search remaining full words for a bit
- */
- res = __find_first_bit (p, size - 64 * (p - addr));
- return (offset + set + res);
-}
-
-#include <linux/module.h>
-
-EXPORT_SYMBOL(find_next_bit);
-EXPORT_SYMBOL(find_first_bit);
-EXPORT_SYMBOL(find_first_zero_bit);
-EXPORT_SYMBOL(find_next_zero_bit);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 6e2c4efce0e..8acbf0cdf1a 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -113,7 +113,7 @@ static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
for_each_online_cpu(cpu) {
if (cpuset & (1 << cpu)) {
#ifdef VOYAGER_DEBUG
- if (!cpu_isset(cpu, cpu_online_map))
+ if (!cpu_online(cpu))
VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
"cpu_online_map\n",
hard_smp_processor_id(), cpi, cpu));
@@ -683,9 +683,9 @@ void __init smp_boot_cpus(void)
* Code added from smpboot.c */
{
unsigned long bogosum = 0;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_isset(i, cpu_online_map))
- bogosum += cpu_data(i).loops_per_jiffy;
+
+ for_each_online_cpu(i)
+ bogosum += cpu_data(i).loops_per_jiffy;
printk(KERN_INFO "Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
cpucount + 1, bogosum / (500000 / HZ),
@@ -1838,7 +1838,7 @@ static int __cpuinit voyager_cpu_up(unsigned int cpu)
return -EIO;
/* Unleash the CPU! */
cpu_set(cpu, smp_commenced_mask);
- while (!cpu_isset(cpu, cpu_online_map))
+ while (!cpu_online(cpu))
mb();
return 0;
}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index baf7c4f643c..4a476189295 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -566,9 +566,9 @@ void __init paging_init(void)
/*
* Test if the WP bit works in supervisor mode. It isn't supported on 386's
- * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
- * used to involve black magic jumps to work around some nasty CPU bugs,
- * but fortunately the switch to using exceptions got rid of all that.
+ * and also on some strange 486's. All 586+'s are OK. This used to involve
+ * black magic jumps to work around some nasty CPU bugs, but fortunately the
+ * switch to using exceptions got rid of all that.
*/
static void __init test_wp_bit(void)
{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0cca6266303..5fbb8652cf5 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -810,7 +810,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
{
#ifdef CONFIG_NUMA
- int nid = phys_to_nid(phys);
+ int nid, next_nid;
#endif
unsigned long pfn = phys >> PAGE_SHIFT;
@@ -829,10 +829,16 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
/* Should check here against the e820 map to avoid double free */
#ifdef CONFIG_NUMA
- reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT);
+ nid = phys_to_nid(phys);
+ next_nid = phys_to_nid(phys + len - 1);
+ if (nid == next_nid)
+ reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT);
+ else
+ reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
#else
reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
#endif
+
if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
dma_reserve += len / PAGE_SIZE;
set_dma_reserve(dma_reserve);
@@ -926,6 +932,10 @@ const char *arch_vma_name(struct vm_area_struct *vma)
/*
* Initialise the sparsemem vmemmap using huge-pages at the PMD level.
*/
+static long __meminitdata addr_start, addr_end;
+static void __meminitdata *p_start, *p_end;
+static int __meminitdata node_start;
+
int __meminit
vmemmap_populate(struct page *start_page, unsigned long size, int node)
{
@@ -960,12 +970,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
PAGE_KERNEL_LARGE);
set_pmd(pmd, __pmd(pte_val(entry)));
- printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
- addr, addr + PMD_SIZE - 1, p, node);
+ /* check to see if we have contiguous blocks */
+ if (p_end != p || node_start != node) {
+ if (p_start)
+ printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+ addr_start, addr_end-1, p_start, p_end-1, node_start);
+ addr_start = addr;
+ node_start = node;
+ p_start = p;
+ }
+ addr_end = addr + PMD_SIZE;
+ p_end = p + PMD_SIZE;
} else {
vmemmap_verify((pte_t *)pmd, node, addr, next);
}
}
return 0;
}
+
+void __meminit vmemmap_populate_print_last(void)
+{
+ if (p_start) {
+ printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+ addr_start, addr_end-1, p_start, p_end-1, node_start);
+ p_start = NULL;
+ p_end = NULL;
+ node_start = 0;
+ }
+}
#endif
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 9a6892200b2..c5066d519e5 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -196,6 +196,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
unsigned long bootmap_start, nodedata_phys;
void *bootmap;
const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
+ int nid;
start = round_up(start, ZONE_ALIGN);
@@ -218,9 +219,19 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
NODE_DATA(nodeid)->node_start_pfn = start_pfn;
NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
- /* Find a place for the bootmem map */
+ /*
+ * Find a place for the bootmem map
+ * nodedata_phys could be on other nodes by alloc_bootmem,
+ * so need to sure bootmap_start not to be small, otherwise
+ * early_node_mem will get that with find_e820_area instead
+ * of alloc_bootmem, that could clash with reserved range
+ */
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
- bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
+ nid = phys_to_nid(nodedata_phys);
+ if (nid == nodeid)
+ bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
+ else
+ bootmap_start = round_up(start, PAGE_SIZE);
/*
* SMP_CAHCE_BYTES could be enough, but init_bootmem_node like
* to use that to align to PAGE_SIZE
@@ -245,10 +256,29 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
free_bootmem_with_active_regions(nodeid, end);
- reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size,
- BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
- bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
+ /*
+ * convert early reserve to bootmem reserve earlier
+ * otherwise early_node_mem could use early reserved mem
+ * on previous node
+ */
+ early_res_to_bootmem(start, end);
+
+ /*
+ * in some case early_node_mem could use alloc_bootmem
+ * to get range on other node, don't reserve that again
+ */
+ if (nid != nodeid)
+ printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
+ else
+ reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
+ pgdat_size, BOOTMEM_DEFAULT);
+ nid = phys_to_nid(bootmap_start);
+ if (nid != nodeid)
+ printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
+ else
+ reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
+ bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
+
#ifdef CONFIG_ACPI_NUMA
srat_reserve_add_area(nodeid);
#endif
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index ef8b64b89c7..e7ca7fc48d1 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -16,6 +16,7 @@
#include <asm/msr.h>
#include <asm/tlbflush.h>
#include <asm/processor.h>
+#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pat.h>
#include <asm/e820.h>
@@ -334,7 +335,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
break;
}
- printk("Overlap at 0x%Lx-0x%Lx\n",
+ pr_debug("Overlap at 0x%Lx-0x%Lx\n",
saved_ptr->start, saved_ptr->end);
/* No conflict. Go ahead and add this new entry */
list_add(&new_entry->nd, saved_ptr->nd.prev);
@@ -477,6 +478,33 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
return vma_prot;
}
+#ifdef CONFIG_NONPROMISC_DEVMEM
+/* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ u64 from = ((u64)pfn) << PAGE_SHIFT;
+ u64 to = from + size;
+ u64 cursor = from;
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn)) {
+ printk(KERN_INFO
+ "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+ current->comm, from, to);
+ return 0;
+ }
+ cursor += PAGE_SIZE;
+ pfn++;
+ }
+ return 1;
+}
+#endif /* CONFIG_NONPROMISC_DEVMEM */
+
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot)
{
@@ -485,6 +513,9 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long ret_flags;
int retval;
+ if (!range_is_allowed(pfn, size))
+ return 0;
+
if (file->f_flags & O_SYNC) {
flags = _PAGE_CACHE_UC;
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 92dd3dbf3ff..94e69000f98 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -193,7 +193,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
/* Restrict the possible_map according to max_cpus. */
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
- for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
+ for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
continue;
cpu_clear(cpu, cpu_possible_map);
}
diff --git a/block/bsg.c b/block/bsg.c
index f51172ed27c..23ea4fd1a66 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -699,14 +699,26 @@ static struct bsg_device *bsg_alloc_device(void)
return bd;
}
+static void bsg_kref_release_function(struct kref *kref)
+{
+ struct bsg_class_device *bcd =
+ container_of(kref, struct bsg_class_device, ref);
+
+ if (bcd->release)
+ bcd->release(bcd->parent);
+
+ put_device(bcd->parent);
+}
+
static int bsg_put_device(struct bsg_device *bd)
{
- int ret = 0;
- struct device *dev = bd->queue->bsg_dev.dev;
+ int ret = 0, do_free;
+ struct request_queue *q = bd->queue;
mutex_lock(&bsg_mutex);
- if (!atomic_dec_and_test(&bd->ref_count))
+ do_free = atomic_dec_and_test(&bd->ref_count);
+ if (!do_free)
goto out;
dprintk("%s: tearing down\n", bd->name);
@@ -723,12 +735,13 @@ static int bsg_put_device(struct bsg_device *bd)
*/
ret = bsg_complete_all_commands(bd);
- blk_put_queue(bd->queue);
hlist_del(&bd->dev_list);
kfree(bd);
out:
mutex_unlock(&bsg_mutex);
- put_device(dev);
+ kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
+ if (do_free)
+ blk_put_queue(q);
return ret;
}
@@ -796,7 +809,7 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
mutex_lock(&bsg_mutex);
bcd = idr_find(&bsg_minor_idr, iminor(inode));
if (bcd)
- get_device(bcd->dev);
+ kref_get(&bcd->ref);
mutex_unlock(&bsg_mutex);
if (!bcd)
@@ -808,7 +821,7 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
bd = bsg_add_device(inode, bcd->queue, file);
if (IS_ERR(bd))
- put_device(bcd->dev);
+ kref_put(&bcd->ref, bsg_kref_release_function);
return bd;
}
@@ -947,14 +960,14 @@ void bsg_unregister_queue(struct request_queue *q)
idr_remove(&bsg_minor_idr, bcd->minor);
sysfs_remove_link(&q->kobj, "bsg");
device_unregister(bcd->class_dev);
- put_device(bcd->dev);
bcd->class_dev = NULL;
+ kref_put(&bcd->ref, bsg_kref_release_function);
mutex_unlock(&bsg_mutex);
}
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
-int bsg_register_queue(struct request_queue *q, struct device *gdev,
- const char *name)
+int bsg_register_queue(struct request_queue *q, struct device *parent,
+ const char *name, void (*release)(struct device *))
{
struct bsg_class_device *bcd;
dev_t dev;
@@ -965,7 +978,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
if (name)
devname = name;
else
- devname = gdev->bus_id;
+ devname = parent->bus_id;
/*
* we need a proper transport to send commands, not a stacked device
@@ -996,9 +1009,11 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
bcd->minor = minor;
bcd->queue = q;
- bcd->dev = get_device(gdev);
+ bcd->parent = get_device(parent);
+ bcd->release = release;
+ kref_init(&bcd->ref);
dev = MKDEV(bsg_major, bcd->minor);
- class_dev = device_create(bsg_class, gdev, dev, "%s", devname);
+ class_dev = device_create(bsg_class, parent, dev, "%s", devname);
if (IS_ERR(class_dev)) {
ret = PTR_ERR(class_dev);
goto put_dev;
@@ -1017,7 +1032,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
unregister_class_dev:
device_unregister(class_dev);
put_dev:
- put_device(gdev);
+ put_device(parent);
remove_idr:
idr_remove(&bsg_minor_idr, minor);
unlock:
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 788da9781f8..0d90ff5fd11 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -418,13 +418,12 @@ static void acpi_processor_idle(void)
cx = pr->power.state;
if (!cx || acpi_idle_suspend) {
- if (pm_idle_save)
- pm_idle_save();
- else
+ if (pm_idle_save) {
+ pm_idle_save(); /* enables IRQs */
+ } else {
acpi_safe_halt();
-
- if (irqs_disabled())
local_irq_enable();
+ }
return;
}
@@ -520,10 +519,12 @@ static void acpi_processor_idle(void)
* Use the appropriate idle routine, the one that would
* be used without acpi C-states.
*/
- if (pm_idle_save)
- pm_idle_save();
- else
+ if (pm_idle_save) {
+ pm_idle_save(); /* enables IRQs */
+ } else {
acpi_safe_halt();
+ local_irq_enable();
+ }
/*
* TBD: Can't get time duration while in C1, as resumes
@@ -534,8 +535,6 @@ static void acpi_processor_idle(void)
* skew otherwise.
*/
sleep_ticks = 0xFFFFFFFF;
- if (irqs_disabled())
- local_irq_enable();
break;
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index d2866999214..96bdb9296b0 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -436,8 +436,9 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
system controller may experience noise due to strong drive strengths
*/
if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
- u8 cap_ptr=0;
struct pci_dev *gfxcard=NULL;
+
+ cap_ptr = 0;
while (!cap_ptr) {
gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
if (!gfxcard) {
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 55d7a82bd07..857b26227d8 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -967,7 +967,7 @@ int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
return 0;
}
-static int agp_ioctl(struct inode *inode, struct file *file,
+static long agp_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct agp_file_private *curr_priv = file->private_data;
@@ -1058,7 +1058,7 @@ static const struct file_operations agp_fops =
.llseek = no_llseek,
.read = agp_read,
.write = agp_write,
- .ioctl = agp_ioctl,
+ .unlocked_ioctl = agp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_agp_ioctl,
#endif
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index 141f4dfa0a1..b710426bab3 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -167,13 +167,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
page_base += ATI_PCIGART_PAGE_SIZE;
}
}
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
- dma_sync_single_for_device(&dev->pdev->dev,
- bus_address,
- max_pages * sizeof(u32),
- PCI_DMA_TODEVICE);
-
ret = 1;
#if defined(__i386__) || defined(__x86_64__)
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 3a05c6d5ebe..6874f31ca8c 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -471,6 +471,7 @@ struct drm_irq_busid {
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
@@ -503,6 +504,21 @@ union drm_wait_vblank {
struct drm_wait_vblank_reply reply;
};
+enum drm_modeset_ctl_cmd {
+ _DRM_PRE_MODESET = 1,
+ _DRM_POST_MODESET = 2,
+};
+
+/**
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+struct drm_modeset_ctl {
+ unsigned long arg;
+ enum drm_modeset_ctl_cmd cmd;
+};
+
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
@@ -587,6 +603,7 @@ struct drm_set_version {
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 6540948d517..ecee3547a13 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -100,10 +100,8 @@ struct drm_device;
#define DRIVER_HAVE_DMA 0x20
#define DRIVER_HAVE_IRQ 0x40
#define DRIVER_IRQ_SHARED 0x80
-#define DRIVER_IRQ_VBL 0x100
#define DRIVER_DMA_QUEUE 0x200
#define DRIVER_FB_DMA 0x400
-#define DRIVER_IRQ_VBL2 0x800
/***********************************************************************/
/** \name Begin the DRM... */
@@ -379,13 +377,12 @@ struct drm_buf_entry {
struct drm_file {
int authenticated;
int master;
- int minor;
pid_t pid;
uid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct list_head lhead;
- struct drm_head *head;
+ struct drm_minor *minor;
int remove_auth_on_close;
unsigned long lock_count;
struct file *filp;
@@ -580,10 +577,52 @@ struct drm_driver {
int (*context_dtor) (struct drm_device *dev, int context);
int (*kernel_context_switch) (struct drm_device *dev, int old,
int new);
- void (*kernel_context_switch_unlock) (struct drm_device *dev);
- int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
- int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
- int (*dri_library_name) (struct drm_device *dev, char *buf);
+ void (*kernel_context_switch_unlock) (struct drm_device * dev);
+ /**
+ * get_vblank_counter - get raw hardware vblank counter
+ * @dev: DRM device
+ * @crtc: counter to fetch
+ *
+ * Driver callback for fetching a raw hardware vblank counter
+ * for @crtc. If a device doesn't have a hardware counter, the
+ * driver can simply return the value of drm_vblank_count and
+ * make the enable_vblank() and disable_vblank() hooks into no-ops,
+ * leaving interrupts enabled at all times.
+ *
+ * Wraparound handling and loss of events due to modesetting is dealt
+ * with in the DRM core code.
+ *
+ * RETURNS
+ * Raw vblank counter value.
+ */
+ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
+
+ /**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+ int (*enable_vblank) (struct drm_device *dev, int crtc);
+
+ /**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+ void (*disable_vblank) (struct drm_device *dev, int crtc);
+ int (*dri_library_name) (struct drm_device *dev, char * buf);
/**
* Called by \c drm_device_is_agp. Typically used to determine if a
@@ -602,7 +641,7 @@ struct drm_driver {
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev);
- void (*irq_postinstall) (struct drm_device *dev);
+ int (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device *dev);
void (*reclaim_buffers) (struct drm_device *dev,
struct drm_file * file_priv);
@@ -630,16 +669,19 @@ struct drm_driver {
struct pci_driver pci_driver;
};
+#define DRM_MINOR_UNASSIGNED 0
+#define DRM_MINOR_LEGACY 1
+
/**
- * DRM head structure. This structure represent a video head on a card
- * that may contain multiple heads. Embed one per head of these in the
- * private drm_device structure.
+ * DRM minor structure. This structure represents a drm minor number.
*/
-struct drm_head {
- int minor; /**< Minor device number */
+struct drm_minor {
+ int index; /**< Minor device number */
+ int type; /**< Control or render */
+ dev_t device; /**< Device number for mknod */
+ struct device kdev; /**< Linux device */
struct drm_device *dev;
struct proc_dir_entry *dev_root; /**< proc directory entry */
- dev_t device; /**< Device number for mknod */
};
/**
@@ -647,7 +689,6 @@ struct drm_head {
* may contain multiple heads.
*/
struct drm_device {
- struct device dev; /**< Linux device */
char *unique; /**< Unique identifier: e.g., busid */
int unique_len; /**< Length of unique field */
char *devname; /**< For /proc/interrupts */
@@ -729,13 +770,21 @@ struct drm_device {
/** \name VBLANK IRQ support */
/*@{ */
- wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
- atomic_t vbl_received;
- atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
+ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
+ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
spinlock_t vbl_lock;
- struct list_head vbl_sigs; /**< signal list to send on VBLANK */
- struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
- unsigned int vbl_pending;
+ struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
+ atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
+ atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */
+ u32 *last_vblank; /* protected by dev->vbl_lock, used */
+ /* for wraparound handling */
+ u32 *vblank_offset; /* used to track how many vblanks */
+ int *vblank_enabled; /* so we don't call enable more than
+ once per disable */
+ u32 *vblank_premodeset; /* were lost during modeset */
+ struct timer_list vblank_disable_timer;
+
+ unsigned long max_vblank_count; /**< size of vblank counter register */
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev);
@@ -755,6 +804,7 @@ struct drm_device {
#ifdef __alpha__
struct pci_controller *hose;
#endif
+ int num_crtcs; /**< Number of CRTCs on this device */
struct drm_sg_mem *sg; /**< Scatter gather memory */
void *dev_private; /**< device private data */
struct drm_sigdata sigdata; /**< For block_all_signals */
@@ -763,7 +813,7 @@ struct drm_device {
struct drm_driver *driver;
drm_local_map_t *agp_buffer_map;
unsigned int agp_buffer_token;
- struct drm_head primary; /**< primary screen head */
+ struct drm_minor *primary; /**< render type primary screen head */
/** \name Drawable information */
/*@{ */
@@ -989,11 +1039,19 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev);
extern void drm_driver_irq_postinstall(struct drm_device *dev);
extern void drm_driver_irq_uninstall(struct drm_device *dev);
-extern int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
-extern void drm_vbl_send_signals(struct drm_device *dev);
+extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp);
+extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
+extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern void drm_update_vblank_count(struct drm_device *dev, int crtc);
+extern void drm_handle_vblank(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, int crtc);
+extern void drm_vblank_put(struct drm_device *dev, int crtc);
+
+ /* Modesetting support */
+extern int drm_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* AGP/GART support (drm_agpsupport.h) */
extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
@@ -1030,23 +1088,20 @@ extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
extern int drm_put_dev(struct drm_device *dev);
-extern int drm_put_head(struct drm_head *head);
+extern int drm_put_minor(struct drm_minor **minor);
extern unsigned int drm_debug;
-extern unsigned int drm_cards_limit;
-extern struct drm_head **drm_heads;
+
extern struct class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
+extern struct idr drm_minors_idr;
+
extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
/* Proc support (drm_proc.h) */
-extern int drm_proc_init(struct drm_device *dev,
- int minor,
- struct proc_dir_entry *root,
- struct proc_dir_entry **dev_root);
-extern int drm_proc_cleanup(int minor,
- struct proc_dir_entry *root,
- struct proc_dir_entry *dev_root);
+extern int drm_proc_init(struct drm_minor *minor, int minor_id,
+ struct proc_dir_entry *root);
+extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
/* Scatter Gather Support (drm_scatter.h) */
extern void drm_sg_cleanup(struct drm_sg_mem * entry);
@@ -1071,8 +1126,8 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
extern void drm_sysfs_destroy(void);
-extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head);
-extern void drm_sysfs_device_remove(struct drm_device *dev);
+extern int drm_sysfs_device_add(struct drm_minor *minor);
+extern void drm_sysfs_device_remove(struct drm_minor *minor);
/*
* Basic memory manager support (drm_mm.c)
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 9468c7889ff..aefa5ac4c0b 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -122,7 +122,7 @@ EXPORT_SYMBOL(drm_agp_acquire);
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- return drm_agp_acquire((struct drm_device *) file_priv->head->dev);
+ return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
}
/**
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 0e7af53c87d..fc54140551a 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -313,35 +313,36 @@ static void drm_cleanup(struct drm_device * dev)
drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
- drm_put_head(&dev->primary);
+ drm_put_minor(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
}
-void drm_exit(struct drm_driver *driver)
+int drm_minors_cleanup(int id, void *ptr, void *data)
{
- int i;
- struct drm_device *dev = NULL;
- struct drm_head *head;
+ struct drm_minor *minor = ptr;
+ struct drm_device *dev;
+ struct drm_driver *driver = data;
+
+ dev = minor->dev;
+ if (minor->dev->driver != driver)
+ return 0;
+
+ if (minor->type != DRM_MINOR_LEGACY)
+ return 0;
+ if (dev)
+ pci_dev_put(dev->pdev);
+ drm_cleanup(dev);
+ return 1;
+}
+
+void drm_exit(struct drm_driver *driver)
+{
DRM_DEBUG("\n");
- for (i = 0; i < drm_cards_limit; i++) {
- head = drm_heads[i];
- if (!head)
- continue;
- if (!head->dev)
- continue;
- if (head->dev->driver != driver)
- continue;
- dev = head->dev;
- if (dev) {
- /* release the pci driver */
- if (dev->pdev)
- pci_dev_put(dev->pdev);
- drm_cleanup(dev);
- }
- }
+ idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
+
DRM_INFO("Module unloaded\n");
}
@@ -357,13 +358,7 @@ static int __init drm_core_init(void)
{
int ret = -ENOMEM;
- drm_cards_limit =
- (drm_cards_limit <
- DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
- drm_heads =
- drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
- if (!drm_heads)
- goto err_p1;
+ idr_init(&drm_minors_idr);
if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
goto err_p1;
@@ -391,7 +386,8 @@ err_p3:
drm_sysfs_destroy();
err_p2:
unregister_chrdev(DRM_MAJOR, "drm");
- drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
+
+ idr_destroy(&drm_minors_idr);
err_p1:
return ret;
}
@@ -403,7 +399,7 @@ static void __exit drm_core_exit(void)
unregister_chrdev(DRM_MAJOR, "drm");
- drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
+ idr_destroy(&drm_minors_idr);
}
module_init(drm_core_init);
@@ -452,7 +448,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->head->dev;
+ struct drm_device *dev = file_priv->minor->dev;
struct drm_ioctl_desc *ioctl;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -465,7 +461,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
task_pid_nr(current), cmd, nr,
- (long)old_encode_dev(file_priv->head->device),
+ (long)old_encode_dev(file_priv->minor->device),
file_priv->authenticated);
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index f09d4b5002b..68f0da801ed 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -129,16 +129,15 @@ static int drm_setup(struct drm_device * dev)
int drm_open(struct inode *inode, struct file *filp)
{
struct drm_device *dev = NULL;
- int minor = iminor(inode);
+ int minor_id = iminor(inode);
+ struct drm_minor *minor;
int retcode = 0;
- if (!((minor >= 0) && (minor < drm_cards_limit)))
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
return -ENODEV;
- if (!drm_heads[minor])
- return -ENODEV;
-
- if (!(dev = drm_heads[minor]->dev))
+ if (!(dev = minor->dev))
return -ENODEV;
retcode = drm_open_helper(inode, filp, dev);
@@ -168,19 +167,18 @@ EXPORT_SYMBOL(drm_open);
int drm_stub_open(struct inode *inode, struct file *filp)
{
struct drm_device *dev = NULL;
- int minor = iminor(inode);
+ struct drm_minor *minor;
+ int minor_id = iminor(inode);
int err = -ENODEV;
const struct file_operations *old_fops;
DRM_DEBUG("\n");
- if (!((minor >= 0) && (minor < drm_cards_limit)))
- return -ENODEV;
-
- if (!drm_heads[minor])
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
return -ENODEV;
- if (!(dev = drm_heads[minor]->dev))
+ if (!(dev = minor->dev))
return -ENODEV;
old_fops = filp->f_op;
@@ -225,7 +223,7 @@ static int drm_cpu_valid(void)
static int drm_open_helper(struct inode *inode, struct file *filp,
struct drm_device * dev)
{
- int minor = iminor(inode);
+ int minor_id = iminor(inode);
struct drm_file *priv;
int ret;
@@ -234,7 +232,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
if (!drm_cpu_valid())
return -EINVAL;
- DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor);
+ DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
if (!priv)
@@ -245,8 +243,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->filp = filp;
priv->uid = current->euid;
priv->pid = task_pid_nr(current);
- priv->minor = minor;
- priv->head = drm_heads[minor];
+ priv->minor = idr_find(&drm_minors_idr, minor_id);
priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
priv->authenticated = capable(CAP_SYS_ADMIN);
@@ -297,11 +294,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
int drm_fasync(int fd, struct file *filp, int on)
{
struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
int retcode;
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
- (long)old_encode_dev(priv->head->device));
+ (long)old_encode_dev(priv->minor->device));
retcode = fasync_helper(fd, filp, on, &dev->buf_async);
if (retcode < 0)
return retcode;
@@ -324,7 +321,7 @@ EXPORT_SYMBOL(drm_fasync);
int drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->head->dev;
+ struct drm_device *dev = file_priv->minor->dev;
int retcode = 0;
unsigned long irqflags;
@@ -341,14 +338,14 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
task_pid_nr(current),
- (long)old_encode_dev(file_priv->head->device),
+ (long)old_encode_dev(file_priv->minor->device),
dev->open_count);
if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
if (drm_i_have_hw_lock(dev, file_priv)) {
dev->driver->reclaim_buffers_locked(dev, file_priv);
} else {
- unsigned long _end=jiffies + 3*DRM_HZ;
+ unsigned long endtime = jiffies + 3 * DRM_HZ;
int locked = 0;
drm_idlelock_take(&dev->lock);
@@ -366,7 +363,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (locked)
break;
schedule();
- } while (!time_after_eq(jiffies, _end));
+ } while (!time_after_eq(jiffies, endtime));
if (!locked) {
DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 089c015c01d..286f9d61e7d 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -71,6 +71,117 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return 0;
}
+static void vblank_disable_fn(unsigned long arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ unsigned long irqflags;
+ int i;
+
+ for (i = 0; i < dev->num_crtcs; i++) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+ dev->vblank_enabled[i]) {
+ dev->driver->disable_vblank(dev, i);
+ dev->vblank_enabled[i] = 0;
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
+}
+
+static void drm_vblank_cleanup(struct drm_device *dev)
+{
+ /* Bail if the driver didn't call drm_vblank_init() */
+ if (dev->num_crtcs == 0)
+ return;
+
+ del_timer(&dev->vblank_disable_timer);
+
+ vblank_disable_fn((unsigned long)dev);
+
+ drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
+ dev->num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
+ dev->num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
+ dev->num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
+ dev->num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
+ DRM_MEM_DRIVER);
+
+ dev->num_crtcs = 0;
+}
+
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+ int i, ret = -ENOMEM;
+
+ setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+ (unsigned long)dev);
+ spin_lock_init(&dev->vbl_lock);
+ atomic_set(&dev->vbl_signal_pending, 0);
+ dev->num_crtcs = num_crtcs;
+
+ dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->vbl_queue)
+ goto err;
+
+ dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->vbl_sigs)
+ goto err;
+
+ dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->_vblank_count)
+ goto err;
+
+ dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_refcount)
+ goto err;
+
+ dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_enabled)
+ goto err;
+
+ dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
+ if (!dev->last_vblank)
+ goto err;
+
+ dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_premodeset)
+ goto err;
+
+ dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
+ if (!dev->vblank_offset)
+ goto err;
+
+ /* Zero per-crtc vblank stuff */
+ for (i = 0; i < num_crtcs; i++) {
+ init_waitqueue_head(&dev->vbl_queue[i]);
+ INIT_LIST_HEAD(&dev->vbl_sigs[i]);
+ atomic_set(&dev->_vblank_count[i], 0);
+ atomic_set(&dev->vblank_refcount[i], 0);
+ }
+
+ return 0;
+
+err:
+ drm_vblank_cleanup(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vblank_init);
+
/**
* Install IRQ handler.
*
@@ -109,17 +220,6 @@ static int drm_irq_install(struct drm_device * dev)
DRM_DEBUG("irq=%d\n", dev->irq);
- if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
- init_waitqueue_head(&dev->vbl_queue);
-
- spin_lock_init(&dev->vbl_lock);
-
- INIT_LIST_HEAD(&dev->vbl_sigs);
- INIT_LIST_HEAD(&dev->vbl_sigs2);
-
- dev->vbl_pending = 0;
- }
-
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@@ -137,9 +237,14 @@ static int drm_irq_install(struct drm_device * dev)
}
/* After installing handler */
- dev->driver->irq_postinstall(dev);
+ ret = dev->driver->irq_postinstall(dev);
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ }
- return 0;
+ return ret;
}
/**
@@ -170,6 +275,8 @@ int drm_irq_uninstall(struct drm_device * dev)
free_irq(dev->irq, dev);
+ drm_vblank_cleanup(dev);
+
dev->locked_tasklet_func = NULL;
return 0;
@@ -214,6 +321,148 @@ int drm_control(struct drm_device *dev, void *data,
}
/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+ return atomic_read(&dev->_vblank_count[crtc]) +
+ dev->vblank_offset[crtc];
+}
+EXPORT_SYMBOL(drm_vblank_count);
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc). Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ */
+void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ u32 cur_vblank, diff;
+
+ /*
+ * Interrupts were disabled prior to this call, so deal with counter
+ * wrap if needed.
+ * NOTE! It's possible we lost a full dev->max_vblank_count events
+ * here if the register is small or we had vblank interrupts off for
+ * a long time.
+ */
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ if (cur_vblank < dev->last_vblank[crtc]) {
+ diff = dev->max_vblank_count -
+ dev->last_vblank[crtc];
+ diff += cur_vblank;
+ } else {
+ diff = cur_vblank - dev->last_vblank[crtc];
+ }
+ dev->last_vblank[crtc] = cur_vblank;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ atomic_add(diff, &dev->_vblank_count[crtc]);
+}
+EXPORT_SYMBOL(drm_update_vblank_count);
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use. Note callers will probably want to update the master counter
+ * using drm_update_vblank_count() above before calling this routine so that
+ * wakeups occur on the right vblank event.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /* Going from 0->1 means we have to enable interrupts again */
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
+ !dev->vblank_enabled[crtc]) {
+ ret = dev->driver->enable_vblank(dev, crtc);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ else
+ dev->vblank_enabled[crtc] = 1;
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_vblank_get);
+
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
+{
+ /* Last user schedules interrupt disable */
+ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
+ mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+}
+EXPORT_SYMBOL(drm_vblank_put);
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ */
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_modeset_ctl *modeset = data;
+ int crtc, ret = 0;
+ u32 new;
+
+ crtc = modeset->arg;
+ if (crtc >= dev->num_crtcs) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (modeset->cmd) {
+ case _DRM_PRE_MODESET:
+ dev->vblank_premodeset[crtc] =
+ dev->driver->get_vblank_counter(dev, crtc);
+ break;
+ case _DRM_POST_MODESET:
+ new = dev->driver->get_vblank_counter(dev, crtc);
+ dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+/**
* Wait for VBLANK.
*
* \param inode device inode.
@@ -232,12 +481,13 @@ int drm_control(struct drm_device *dev, void *data,
*
* If a signal is not requested, then calls vblank_wait().
*/
-int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
struct timeval now;
int ret = 0;
- unsigned int flags, seq;
+ unsigned int flags, seq, crtc;
if ((!dev->irq) || (!dev->irq_enabled))
return -EINVAL;
@@ -251,13 +501,13 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
- if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
- DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
+ if (crtc >= dev->num_crtcs)
return -EINVAL;
- seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
- : &dev->vbl_received);
+ drm_update_vblank_count(dev, crtc);
+ seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
@@ -276,8 +526,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
- struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
- ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+ struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
struct drm_vbl_sig *vbl_sig;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -298,22 +547,26 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
}
}
- if (dev->vbl_pending >= 100) {
+ if (atomic_read(&dev->vbl_signal_pending) >= 100) {
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return -EBUSY;
}
- dev->vbl_pending++;
-
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- if (!
- (vbl_sig =
- drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
+ vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
+ DRM_MEM_DRIVER);
+ if (!vbl_sig)
return -ENOMEM;
+
+ ret = drm_vblank_get(dev, crtc);
+ if (ret) {
+ drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
+ DRM_MEM_DRIVER);
+ return ret;
}
- memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
+ atomic_inc(&dev->vbl_signal_pending);
vbl_sig->sequence = vblwait->request.sequence;
vbl_sig->info.si_signo = vblwait->request.signal;
@@ -327,17 +580,20 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
vblwait->reply.sequence = seq;
} else {
- if (flags & _DRM_VBLANK_SECONDARY) {
- if (dev->driver->vblank_wait2)
- ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
- } else if (dev->driver->vblank_wait)
- ret =
- dev->driver->vblank_wait(dev,
- &vblwait->request.sequence);
-
+ unsigned long cur_vblank;
+
+ ret = drm_vblank_get(dev, crtc);
+ if (ret)
+ return ret;
+ DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+ (((cur_vblank = drm_vblank_count(dev, crtc))
+ - vblwait->request.sequence) <= (1 << 23)));
+ drm_vblank_put(dev, crtc);
do_gettimeofday(&now);
+
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
+ vblwait->reply.sequence = cur_vblank;
}
done:
@@ -348,44 +604,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
* Send the VBLANK signals.
*
* \param dev DRM device.
+ * \param crtc CRTC where the vblank event occurred
*
* Sends a signal for each task in drm_device::vbl_sigs and empties the list.
*
* If a signal is not requested, then calls vblank_wait().
*/
-void drm_vbl_send_signals(struct drm_device * dev)
+static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
{
+ struct drm_vbl_sig *vbl_sig, *tmp;
+ struct list_head *vbl_sigs;
+ unsigned int vbl_seq;
unsigned long flags;
- int i;
spin_lock_irqsave(&dev->vbl_lock, flags);
- for (i = 0; i < 2; i++) {
- struct drm_vbl_sig *vbl_sig, *tmp;
- struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
- unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
- &dev->vbl_received);
+ vbl_sigs = &dev->vbl_sigs[crtc];
+ vbl_seq = drm_vblank_count(dev, crtc);
- list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
- if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
- vbl_sig->info.si_code = vbl_seq;
- send_sig_info(vbl_sig->info.si_signo,
- &vbl_sig->info, vbl_sig->task);
+ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
+ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
+ vbl_sig->info.si_code = vbl_seq;
+ send_sig_info(vbl_sig->info.si_signo,
+ &vbl_sig->info, vbl_sig->task);
- list_del(&vbl_sig->head);
+ list_del(&vbl_sig->head);
- drm_free(vbl_sig, sizeof(*vbl_sig),
- DRM_MEM_DRIVER);
-
- dev->vbl_pending--;
- }
- }
+ drm_free(vbl_sig, sizeof(*vbl_sig),
+ DRM_MEM_DRIVER);
+ atomic_dec(&dev->vbl_signal_pending);
+ drm_vblank_put(dev, crtc);
+ }
}
spin_unlock_irqrestore(&dev->vbl_lock, flags);
}
-EXPORT_SYMBOL(drm_vbl_send_signals);
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+void drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+ drm_update_vblank_count(dev, crtc);
+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ drm_vbl_send_signals(dev, crtc);
+}
+EXPORT_SYMBOL(drm_handle_vblank);
/**
* Tasklet wrapper function.
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index d9b560fe9bb..93b1e0475c9 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -87,34 +87,35 @@ static struct drm_proc_list {
* "/proc/dri/%minor%/", and each entry in proc_list as
* "/proc/dri/%minor%/%name%".
*/
-int drm_proc_init(struct drm_device * dev, int minor,
- struct proc_dir_entry *root, struct proc_dir_entry **dev_root)
+int drm_proc_init(struct drm_minor *minor, int minor_id,
+ struct proc_dir_entry *root)
{
struct proc_dir_entry *ent;
int i, j;
char name[64];
- sprintf(name, "%d", minor);
- *dev_root = proc_mkdir(name, root);
- if (!*dev_root) {
+ sprintf(name, "%d", minor_id);
+ minor->dev_root = proc_mkdir(name, root);
+ if (!minor->dev_root) {
DRM_ERROR("Cannot create /proc/dri/%s\n", name);
return -1;
}
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
ent = create_proc_entry(drm_proc_list[i].name,
- S_IFREG | S_IRUGO, *dev_root);
+ S_IFREG | S_IRUGO, minor->dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
name, drm_proc_list[i].name);
for (j = 0; j < i; j++)
remove_proc_entry(drm_proc_list[i].name,
- *dev_root);
+ minor->dev_root);
remove_proc_entry(name, root);
+ minor->dev_root = NULL;
return -1;
}
ent->read_proc = drm_proc_list[i].f;
- ent->data = dev;
+ ent->data = minor;
}
return 0;
@@ -130,18 +131,17 @@ int drm_proc_init(struct drm_device * dev, int minor,
*
* Remove all proc entries created by proc_init().
*/
-int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
- struct proc_dir_entry *dev_root)
+int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
{
int i;
char name[64];
- if (!root || !dev_root)
+ if (!root || !minor->dev_root)
return 0;
for (i = 0; i < DRM_PROC_ENTRIES; i++)
- remove_proc_entry(drm_proc_list[i].name, dev_root);
- sprintf(name, "%d", minor);
+ remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
+ sprintf(name, "%d", minor->index);
remove_proc_entry(name, root);
return 0;
@@ -163,7 +163,8 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
static int drm_name_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
@@ -205,7 +206,8 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
static int drm__vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int len = 0;
struct drm_map *map;
struct drm_map_list *r_list;
@@ -261,7 +263,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
static int drm_vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -284,7 +287,8 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request,
static int drm__queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int len = 0;
int i;
struct drm_queue *q;
@@ -334,7 +338,8 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
static int drm_queues_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -357,7 +362,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request,
static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int len = 0;
struct drm_device_dma *dma = dev->dma;
int i;
@@ -406,7 +412,8 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -429,7 +436,8 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
static int drm__clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int len = 0;
struct drm_file *priv;
@@ -445,7 +453,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
list_for_each_entry(priv, &dev->filelist, lhead) {
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
- priv->minor,
+ priv->minor->index,
priv->pid,
priv->uid, priv->magic, priv->ioctl_count);
}
@@ -462,7 +470,8 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
static int drm_clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -476,7 +485,8 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
static int drm__vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int len = 0;
struct drm_vma_entry *pt;
struct vm_area_struct *vma;
@@ -535,7 +545,8 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
static int drm_vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- struct drm_device *dev = (struct drm_device *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index d93a217f856..c2f584f3b46 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -36,23 +36,49 @@
#include "drmP.h"
#include "drm_core.h"
-unsigned int drm_cards_limit = 16; /* Enough for one machine */
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
MODULE_PARM_DESC(debug, "Enable debug output");
-module_param_named(cards_limit, drm_cards_limit, int, 0444);
module_param_named(debug, drm_debug, int, 0600);
-struct drm_head **drm_heads;
+struct idr drm_minors_idr;
+
struct class *drm_class;
struct proc_dir_entry *drm_proc_root;
+static int drm_minor_get_id(struct drm_device *dev, int type)
+{
+ int new_id;
+ int ret;
+ int base = 0, limit = 63;
+
+again:
+ if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Out of memory expanding drawable idr\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&dev->struct_mutex);
+ ret = idr_get_new_above(&drm_minors_idr, NULL,
+ base, &new_id);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret == -EAGAIN) {
+ goto again;
+ } else if (ret) {
+ return ret;
+ }
+
+ if (new_id >= limit) {
+ idr_remove(&drm_minors_idr, new_id);
+ return -EINVAL;
+ }
+ return new_id;
+}
+
static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
@@ -145,48 +171,60 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
* create the proc init entry via proc_init(). This routines assigns
* minor numbers to secondary heads of multi-headed cards
*/
-static int drm_get_head(struct drm_device * dev, struct drm_head * head)
+static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
{
- struct drm_head **heads = drm_heads;
+ struct drm_minor *new_minor;
int ret;
- int minor;
+ int minor_id;
DRM_DEBUG("\n");
- for (minor = 0; minor < drm_cards_limit; minor++, heads++) {
- if (!*heads) {
-
- *head = (struct drm_head) {
- .dev = dev,.device =
- MKDEV(DRM_MAJOR, minor),.minor = minor,};
-
- if ((ret =
- drm_proc_init(dev, minor, drm_proc_root,
- &head->dev_root))) {
- printk(KERN_ERR
- "DRM: Failed to initialize /proc/dri.\n");
- goto err_g1;
- }
-
- ret = drm_sysfs_device_add(dev, head);
- if (ret) {
- printk(KERN_ERR
- "DRM: Error sysfs_device_add.\n");
- goto err_g2;
- }
- *heads = head;
-
- DRM_DEBUG("new minor assigned %d\n", minor);
- return 0;
+ minor_id = drm_minor_get_id(dev, type);
+ if (minor_id < 0)
+ return minor_id;
+
+ new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
+ if (!new_minor) {
+ ret = -ENOMEM;
+ goto err_idr;
+ }
+
+ new_minor->type = type;
+ new_minor->device = MKDEV(DRM_MAJOR, minor_id);
+ new_minor->dev = dev;
+ new_minor->index = minor_id;
+
+ idr_replace(&drm_minors_idr, new_minor, minor_id);
+
+ if (type == DRM_MINOR_LEGACY) {
+ ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
+ if (ret) {
+ DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
+ goto err_mem;
}
+ } else
+ new_minor->dev_root = NULL;
+
+ ret = drm_sysfs_device_add(new_minor);
+ if (ret) {
+ printk(KERN_ERR
+ "DRM: Error sysfs_device_add.\n");
+ goto err_g2;
}
- DRM_ERROR("out of minors\n");
- return -ENOMEM;
- err_g2:
- drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
- err_g1:
- *head = (struct drm_head) {
- .dev = NULL};
+ *minor = new_minor;
+
+ DRM_DEBUG("new minor assigned %d\n", minor_id);
+ return 0;
+
+
+err_g2:
+ if (new_minor->type == DRM_MINOR_LEGACY)
+ drm_proc_cleanup(new_minor, drm_proc_root);
+err_mem:
+ kfree(new_minor);
+err_idr:
+ idr_remove(&drm_minors_idr, minor_id);
+ *minor = NULL;
return ret;
}
@@ -222,12 +260,12 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
- if ((ret = drm_get_head(dev, &dev->primary)))
+ if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g2;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, dev->primary.minor);
+ driver->date, dev->primary->index);
return 0;
@@ -276,18 +314,18 @@ int drm_put_dev(struct drm_device * dev)
* last minor released.
*
*/
-int drm_put_head(struct drm_head * head)
+int drm_put_minor(struct drm_minor **minor_p)
{
- int minor = head->minor;
-
- DRM_DEBUG("release secondary minor %d\n", minor);
-
- drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
- drm_sysfs_device_remove(head->dev);
+ struct drm_minor *minor = *minor_p;
+ DRM_DEBUG("release secondary minor %d\n", minor->index);
- *head = (struct drm_head) {.dev = NULL};
+ if (minor->type == DRM_MINOR_LEGACY)
+ drm_proc_cleanup(minor, drm_proc_root);
+ drm_sysfs_device_remove(minor);
- drm_heads[minor] = NULL;
+ idr_remove(&drm_minors_idr, minor->index);
+ kfree(minor);
+ *minor_p = NULL;
return 0;
}
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index 05ed5043254..7a1d9a782dd 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -19,7 +19,7 @@
#include "drm_core.h"
#include "drmP.h"
-#define to_drm_device(d) container_of(d, struct drm_device, dev)
+#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
/**
* drm_sysfs_suspend - DRM class suspend hook
@@ -31,7 +31,8 @@
*/
static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
{
- struct drm_device *drm_dev = to_drm_device(dev);
+ struct drm_minor *drm_minor = to_drm_minor(dev);
+ struct drm_device *drm_dev = drm_minor->dev;
printk(KERN_ERR "%s\n", __FUNCTION__);
@@ -50,7 +51,8 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
*/
static int drm_sysfs_resume(struct device *dev)
{
- struct drm_device *drm_dev = to_drm_device(dev);
+ struct drm_minor *drm_minor = to_drm_minor(dev);
+ struct drm_device *drm_dev = drm_minor->dev;
if (drm_dev->driver->resume)
return drm_dev->driver->resume(drm_dev);
@@ -120,10 +122,11 @@ void drm_sysfs_destroy(void)
static ssize_t show_dri(struct device *device, struct device_attribute *attr,
char *buf)
{
- struct drm_device *dev = to_drm_device(device);
- if (dev->driver->dri_library_name)
- return dev->driver->dri_library_name(dev, buf);
- return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
+ struct drm_minor *drm_minor = to_drm_minor(device);
+ struct drm_device *drm_dev = drm_minor->dev;
+ if (drm_dev->driver->dri_library_name)
+ return drm_dev->driver->dri_library_name(drm_dev, buf);
+ return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name);
}
static struct device_attribute device_attrs[] = {
@@ -152,25 +155,28 @@ static void drm_sysfs_device_release(struct device *dev)
* as the parent for the Linux device, and make sure it has a file containing
* the driver we're using (for userspace compatibility).
*/
-int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)
+int drm_sysfs_device_add(struct drm_minor *minor)
{
int err;
int i, j;
+ char *minor_str;
- dev->dev.parent = &dev->pdev->dev;
- dev->dev.class = drm_class;
- dev->dev.release = drm_sysfs_device_release;
- dev->dev.devt = head->device;
- snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor);
+ minor->kdev.parent = &minor->dev->pdev->dev;
+ minor->kdev.class = drm_class;
+ minor->kdev.release = drm_sysfs_device_release;
+ minor->kdev.devt = minor->device;
+ minor_str = "card%d";
- err = device_register(&dev->dev);
+ snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
+
+ err = device_register(&minor->kdev);
if (err) {
DRM_ERROR("device add failed: %d\n", err);
goto err_out;
}
for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
- err = device_create_file(&dev->dev, &device_attrs[i]);
+ err = device_create_file(&minor->kdev, &device_attrs[i]);
if (err)
goto err_out_files;
}
@@ -180,8 +186,8 @@ int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)
err_out_files:
if (i > 0)
for (j = 0; j < i; j++)
- device_remove_file(&dev->dev, &device_attrs[i]);
- device_unregister(&dev->dev);
+ device_remove_file(&minor->kdev, &device_attrs[i]);
+ device_unregister(&minor->kdev);
err_out:
return err;
@@ -194,11 +200,11 @@ err_out:
* This call unregisters and cleans up a class device that was created with a
* call to drm_sysfs_device_add()
*/
-void drm_sysfs_device_remove(struct drm_device *dev)
+void drm_sysfs_device_remove(struct drm_minor *minor)
{
int i;
for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
- device_remove_file(&dev->dev, &device_attrs[i]);
- device_unregister(&dev->dev);
+ device_remove_file(&minor->kdev, &device_attrs[i]);
+ device_unregister(&minor->kdev);
}
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 945df72a51a..c234c6f24a8 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -90,7 +90,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
struct drm_map *map = NULL;
struct drm_map_list *r_list;
struct drm_hash_item *hash;
@@ -207,7 +207,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static void drm_vm_shm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
struct drm_map *map;
struct drm_map_list *r_list;
@@ -286,7 +286,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
struct drm_device_dma *dma = dev->dma;
unsigned long offset;
unsigned long page_nr;
@@ -321,7 +321,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_map *map = (struct drm_map *) vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
struct drm_sg_mem *entry = dev->sg;
unsigned long offset;
unsigned long map_offset;
@@ -402,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
static void drm_vm_open_locked(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -420,7 +420,7 @@ static void drm_vm_open_locked(struct vm_area_struct *vma)
static void drm_vm_open(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_open_locked(vma);
@@ -438,7 +438,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
static void drm_vm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -473,7 +473,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
struct drm_device_dma *dma;
unsigned long length = vma->vm_end - vma->vm_start;
- dev = priv->head->dev;
+ dev = priv->minor->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
@@ -543,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
struct drm_map *map = NULL;
unsigned long offset = 0;
struct drm_hash_item *hash;
@@ -640,12 +640,12 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
vma->vm_flags |= VM_RESERVED;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map;
vma->vm_flags |= VM_RESERVED;
+ vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
default:
return -EINVAL; /* This should never happen. */
@@ -661,7 +661,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->head->dev;
+ struct drm_device *dev = priv->minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index 8d7ea81c4b6..e5de8ea4154 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -94,7 +94,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
drm_i810_buf_priv_t *buf_priv;
lock_kernel();
- dev = priv->head->dev;
+ dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
buf_priv = buf->dev_private;
@@ -122,7 +122,7 @@ static const struct file_operations i810_buffer_fops = {
static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
{
- struct drm_device *dev = file_priv->head->dev;
+ struct drm_device *dev = file_priv->minor->dev;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_private_t *dev_priv = dev->dev_private;
const struct file_operations *old_fops;
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 9df08105f4f..60c9376be48 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -96,7 +96,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
drm_i830_buf_priv_t *buf_priv;
lock_kernel();
- dev = priv->head->dev;
+ dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
buf_priv = buf->dev_private;
@@ -124,7 +124,7 @@ static const struct file_operations i830_buffer_fops = {
static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
{
- struct drm_device *dev = file_priv->head->dev;
+ struct drm_device *dev = file_priv->minor->dev;
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
drm_i830_private_t *dev_priv = dev->dev_private;
const struct file_operations *old_fops;
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index a043bb12301..ef7bf143a80 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -415,10 +415,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
- dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
+ if (++dev_priv->counter > BREADCRUMB_MASK) {
+ dev_priv->counter = 1;
+ DRM_DEBUG("Breadcrumb counter wrapped around\n");
+ }
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
@@ -428,6 +431,26 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
ADVANCE_LP_RING();
}
+int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t flush_cmd = CMD_MI_FLUSH;
+ RING_LOCALS;
+
+ flush_cmd |= flush;
+
+ i915_kernel_lost_context(dev);
+
+ BEGIN_LP_RING(4);
+ OUT_RING(flush_cmd);
+ OUT_RING(0);
+ OUT_RING(0);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+
+ return 0;
+}
+
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
drm_i915_cmdbuffer_t * cmd)
{
@@ -511,52 +534,74 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
return 0;
}
-static int i915_dispatch_flip(struct drm_device * dev)
+static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 num_pages, current_page, next_page, dspbase;
+ int shift = 2 * plane, x, y;
RING_LOCALS;
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __FUNCTION__,
- dev_priv->current_page,
- dev_priv->sarea_priv->pf_current_page);
+ /* Calculate display base offset */
+ num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
+ current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
+ next_page = (current_page + 1) % num_pages;
- i915_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ switch (next_page) {
+ default:
+ case 0:
+ dspbase = dev_priv->sarea_priv->front_offset;
+ break;
+ case 1:
+ dspbase = dev_priv->sarea_priv->back_offset;
+ break;
+ case 2:
+ dspbase = dev_priv->sarea_priv->third_offset;
+ break;
+ }
- BEGIN_LP_RING(6);
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
+ if (plane == 0) {
+ x = dev_priv->sarea_priv->planeA_x;
+ y = dev_priv->sarea_priv->planeA_y;
} else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
+ x = dev_priv->sarea_priv->planeB_x;
+ y = dev_priv->sarea_priv->planeB_y;
}
- OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+ DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
+ dspbase);
BEGIN_LP_RING(4);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(20);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
+ OUT_RING(sync ? 0 :
+ (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
+ MI_WAIT_FOR_PLANE_A_FLIP)));
+ OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
+ (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
+ OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
+ OUT_RING(dspbase);
ADVANCE_LP_RING();
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
- return 0;
+ dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
+ dev_priv->sarea_priv->pf_current_page |= next_page << shift;
+}
+
+void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
+ planes, dev_priv->sarea_priv->pf_current_page);
+
+ i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
+
+ for (i = 0; i < 2; i++)
+ if (planes & (1 << i))
+ i915_do_dispatch_flip(dev, i, sync);
+
+ i915_emit_breadcrumb(dev);
+
}
static int i915_quiescent(struct drm_device * dev)
@@ -579,7 +624,6 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
dev_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
@@ -602,7 +646,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
ret = i915_dispatch_batchbuffer(dev, batch);
- sarea_priv->last_dispatch = (int)hw_status[5];
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return ret;
}
@@ -610,7 +654,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
dev_priv->sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
@@ -635,18 +678,51 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return ret;
}
- sarea_priv->last_dispatch = (int)hw_status[5];
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
+}
+
+static int i915_do_cleanup_pageflip(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
+
+ DRM_DEBUG("\n");
+
+ for (i = 0, planes = 0; i < 2; i++)
+ if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
+ dev_priv->sarea_priv->pf_current_page =
+ (dev_priv->sarea_priv->pf_current_page &
+ ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
+
+ planes |= 1 << i;
+ }
+
+ if (planes)
+ i915_dispatch_flip(dev, planes, 0);
+
return 0;
}
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- DRM_DEBUG("%s\n", __FUNCTION__);
+ drm_i915_flip_t *param = data;
+
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
- return i915_dispatch_flip(dev);
+ /* This is really planes */
+ if (param->pipes & ~0x3) {
+ DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
+ param->pipes);
+ return -EINVAL;
+ }
+
+ i915_dispatch_flip(dev, param->pipes, 0);
+
+ return 0;
}
static int i915_getparam(struct drm_device *dev, void *data,
@@ -807,6 +883,8 @@ void i915_driver_lastclose(struct drm_device * dev)
if (!dev_priv)
return;
+ if (drm_getsarea(dev) && dev_priv->sarea_priv)
+ i915_do_cleanup_pageflip(dev);
if (dev_priv->agp_heap)
i915_mem_takedown(&(dev_priv->agp_heap));
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 05c66cf03a9..0431c00e228 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -105,14 +105,29 @@ typedef struct _drm_i915_sarea {
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
- int pipeA_x;
- int pipeA_y;
- int pipeA_w;
- int pipeA_h;
- int pipeB_x;
- int pipeB_y;
- int pipeB_w;
- int pipeB_h;
+ int planeA_x;
+ int planeA_y;
+ int planeA_w;
+ int planeA_h;
+ int planeB_x;
+ int planeB_y;
+ int planeB_w;
+ int planeB_h;
+
+ /* Triple buffering */
+ drm_handle_t third_handle;
+ int third_offset;
+ int third_size;
+ unsigned int third_tiled;
+
+ /* buffer object handles for the static buffers. May change
+ * over the lifetime of the client, though it doesn't in our current
+ * implementation.
+ */
+ unsigned int front_bo_handle;
+ unsigned int back_bo_handle;
+ unsigned int third_bo_handle;
+ unsigned int depth_bo_handle;
} drm_i915_sarea_t;
/* Flags for perf_boxes
@@ -146,7 +161,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
-#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
+#define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
@@ -161,6 +176,18 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+/* Asynchronous page flipping:
+ */
+typedef struct drm_i915_flip {
+ /*
+ * This is really talking about planes, and we could rename it
+ * except for the fact that some of the duplicated i915_drm.h files
+ * out there check for HAVE_I915_FLIP and so might pick up this
+ * version.
+ */
+ int pipes;
+} drm_i915_flip_t;
+
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
*/
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index b2b451dc446..bb8f1b2fb38 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -533,8 +533,7 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
- DRIVER_IRQ_VBL2,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.load = i915_driver_load,
.unload = i915_driver_unload,
.lastclose = i915_driver_lastclose,
@@ -542,8 +541,9 @@ static struct drm_driver driver = {
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
- .vblank_wait = i915_driver_vblank_wait,
- .vblank_wait2 = i915_driver_vblank_wait2,
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915_enable_vblank,
+ .disable_vblank = i915_disable_vblank,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 675d88bda06..c614d78b3df 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -76,8 +76,9 @@ struct mem_block {
typedef struct _drm_i915_vbl_swap {
struct list_head head;
drm_drawable_t drw_id;
- unsigned int pipe;
+ unsigned int plane;
unsigned int sequence;
+ int flip;
} drm_i915_vbl_swap_t;
typedef struct drm_i915_private {
@@ -90,7 +91,7 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
void *hw_status_page;
dma_addr_t dma_status_page;
- unsigned long counter;
+ uint32_t counter;
unsigned int status_gfx_addr;
drm_local_map_t hws_map;
@@ -103,13 +104,18 @@ typedef struct drm_i915_private {
wait_queue_head_t irq_queue;
atomic_t irq_received;
- atomic_t irq_emitted;
+ atomic_t irq_emited;
int tex_lru_log_granularity;
int allow_batchbuffer;
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
+ spinlock_t user_irq_lock;
+ int user_irq_refcount;
+ int fence_irq_on;
+ uint32_t irq_enable_reg;
+ int irq_enabled;
spinlock_t swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
@@ -216,7 +222,7 @@ extern void i915_driver_preclose(struct drm_device *dev,
extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-
+extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
/* i915_irq.c */
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -227,7 +233,7 @@ extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequenc
extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern void i915_driver_irq_postinstall(struct drm_device * dev);
+extern int i915_driver_irq_postinstall(struct drm_device * dev);
extern void i915_driver_irq_uninstall(struct drm_device * dev);
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -235,6 +241,9 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int i915_enable_vblank(struct drm_device *dev, int crtc);
+extern void i915_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -379,21 +388,91 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/* Interrupt bits:
*/
-#define USER_INT_FLAG (1<<1)
-#define VSYNC_PIPEB_FLAG (1<<5)
-#define VSYNC_PIPEA_FLAG (1<<7)
-#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
+#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
+#define I915_HWB_OOM_INTERRUPT (1<<13) /* binner out of memory */
+#define I915_SYNC_STATUS_INTERRUPT (1<<12)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
+#define I915_DEBUG_INTERRUPT (1<<2)
+#define I915_USER_INTERRUPT (1<<1)
+
#define I915REG_HWSTAM 0x02098
#define I915REG_INT_IDENTITY_R 0x020a4
#define I915REG_INT_MASK_R 0x020a8
#define I915REG_INT_ENABLE_R 0x020a0
+#define I915REG_INSTPM 0x020c0
+
+#define PIPEADSL 0x70000
+#define PIPEBDSL 0x71000
#define I915REG_PIPEASTAT 0x70024
#define I915REG_PIPEBSTAT 0x71024
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ * do {
+ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT;
+ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ * PIPE_FRAME_LOW_SHIFT);
+ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT);
+ * } while (high1 != high2);
+ * frame = (high1 << 8) | low1;
+ */
+#define PIPEAFRAMEHIGH 0x70040
+#define PIPEBFRAMEHIGH 0x71040
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+#define PIPEAFRAMEPIXEL 0x70044
+#define PIPEBFRAMEPIXEL 0x71044
-#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define I915_VBLANK_CLEAR (1UL<<1)
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+/*
+ * Pixel within the current frame is counted in the PIPEAFRAMEPIXEL register
+ * and is 24 bits wide.
+ */
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+
+#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define I915_CRC_ERROR_ENABLE (1UL<<29)
+#define I915_CRC_DONE_ENABLE (1UL<<28)
+#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
+#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
+#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
+#define I915_DPST_EVENT_ENABLE (1UL<<23)
+#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
+#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
+#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
+#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
+#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
+#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
+#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
+#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
+#define I915_DPST_EVENT_STATUS (1UL<<7)
+#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
+#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
+#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
+#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
+#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
+#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
@@ -566,6 +645,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
+#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_BUFFER_START (0x31<<23)
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 92653b38e64..023ce66ef3a 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -38,6 +38,109 @@
#define MAX_NOPID ((u32)~0)
/**
+ * i915_get_pipe - return the the pipe associated with a given plane
+ * @dev: DRM device
+ * @plane: plane to look for
+ *
+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
+ * rather than a pipe number, since they may not always be equal. This routine
+ * maps the given @plane back to a pipe number.
+ */
+static int
+i915_get_pipe(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 dspcntr;
+
+ dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
+
+ return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
+}
+
+/**
+ * i915_get_plane - return the the plane associated with a given pipe
+ * @dev: DRM device
+ * @pipe: pipe to look for
+ *
+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
+ * rather than a plane number, since they may not always be equal. This routine
+ * maps the given @pipe back to a plane number.
+ */
+static int
+i915_get_plane(struct drm_device *dev, int pipe)
+{
+ if (i915_get_pipe(dev, 0) == pipe)
+ return 0;
+ return 1;
+}
+
+/**
+ * i915_pipe_enabled - check if a pipe is enabled
+ * @dev: DRM device
+ * @pipe: pipe to check
+ *
+ * Reading certain registers when the pipe is disabled can hang the chip.
+ * Use this routine to make sure the PLL is running and the pipe is active
+ * before reading such registers if unsure.
+ */
+static int
+i915_pipe_enabled(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
+
+ if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Emit a synchronous flip.
+ *
+ * This function must be called with the drawable spinlock held.
+ */
+static void
+i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
+ int plane)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ u16 x1, y1, x2, y2;
+ int pf_planes = 1 << plane;
+
+ /* If the window is visible on the other plane, we have to flip on that
+ * plane as well.
+ */
+ if (plane == 1) {
+ x1 = sarea_priv->planeA_x;
+ y1 = sarea_priv->planeA_y;
+ x2 = x1 + sarea_priv->planeA_w;
+ y2 = y1 + sarea_priv->planeA_h;
+ } else {
+ x1 = sarea_priv->planeB_x;
+ y1 = sarea_priv->planeB_y;
+ x2 = x1 + sarea_priv->planeB_w;
+ y2 = y1 + sarea_priv->planeB_h;
+ }
+
+ if (x2 > 0 && y2 > 0) {
+ int i, num_rects = drw->num_rects;
+ struct drm_clip_rect *rect = drw->rects;
+
+ for (i = 0; i < num_rects; i++)
+ if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
+ rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
+ pf_planes = 0x3;
+
+ break;
+ }
+ }
+
+ i915_dispatch_flip(dev, pf_planes, 1);
+}
+
+/**
* Emit blits for scheduled buffer swaps.
*
* This function will be called with the HW lock held.
@@ -45,40 +148,59 @@
static void i915_vblank_tasklet(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
struct list_head *list, *tmp, hits, *hit;
- int nhits, nrects, slice[2], upper[2], lower[2], i;
- unsigned counter[2] = { atomic_read(&dev->vbl_received),
- atomic_read(&dev->vbl_received2) };
+ int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
+ unsigned counter[2];
struct drm_drawable_info *drw;
drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
- u32 cpp = dev_priv->cpp;
+ u32 cpp = dev_priv->cpp, offsets[3];
u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB)
: XY_SRC_COPY_BLT_CMD;
- u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
- (cpp << 23) | (1 << 24);
+ u32 src_pitch = sarea_priv->pitch * cpp;
+ u32 dst_pitch = sarea_priv->pitch * cpp;
+ /* COPY rop (0xcc), map cpp to magic color depth constants */
+ u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
RING_LOCALS;
+ if (sarea_priv->front_tiled) {
+ cmd |= XY_SRC_COPY_BLT_DST_TILED;
+ dst_pitch >>= 2;
+ }
+ if (sarea_priv->back_tiled) {
+ cmd |= XY_SRC_COPY_BLT_SRC_TILED;
+ src_pitch >>= 2;
+ }
+
+ counter[0] = drm_vblank_count(dev, 0);
+ counter[1] = drm_vblank_count(dev, 1);
+
DRM_DEBUG("\n");
INIT_LIST_HEAD(&hits);
nhits = nrects = 0;
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+ /* No irqsave/restore necessary. This tasklet may be run in an
+ * interrupt context or normal context, but we don't have to worry
+ * about getting interrupted by something acquiring the lock, because
+ * we are the interrupt context thing that acquires the lock.
+ */
+ spin_lock(&dev_priv->swaps_lock);
/* Find buffer swaps scheduled for this vertical blank */
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
drm_i915_vbl_swap_t *vbl_swap =
list_entry(list, drm_i915_vbl_swap_t, head);
+ int pipe = i915_get_pipe(dev, vbl_swap->plane);
- if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
+ if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
continue;
list_del(list);
dev_priv->swaps_pending--;
+ drm_vblank_put(dev, pipe);
spin_unlock(&dev_priv->swaps_lock);
spin_lock(&dev->drw_lock);
@@ -116,33 +238,23 @@ static void i915_vblank_tasklet(struct drm_device *dev)
spin_lock(&dev_priv->swaps_lock);
}
- if (nhits == 0) {
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
- return;
- }
-
spin_unlock(&dev_priv->swaps_lock);
- i915_kernel_lost_context(dev);
-
- BEGIN_LP_RING(6);
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(0);
- OUT_RING(0);
- OUT_RING(sarea_priv->width | sarea_priv->height << 16);
- OUT_RING(sarea_priv->width | sarea_priv->height << 16);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
+ if (nhits == 0)
+ return;
- sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
+ i915_kernel_lost_context(dev);
upper[0] = upper[1] = 0;
- slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
- slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
- lower[0] = sarea_priv->pipeA_y + slice[0];
- lower[1] = sarea_priv->pipeB_y + slice[0];
+ slice[0] = max(sarea_priv->planeA_h / nhits, 1);
+ slice[1] = max(sarea_priv->planeB_h / nhits, 1);
+ lower[0] = sarea_priv->planeA_y + slice[0];
+ lower[1] = sarea_priv->planeB_y + slice[0];
+
+ offsets[0] = sarea_priv->front_offset;
+ offsets[1] = sarea_priv->back_offset;
+ offsets[2] = sarea_priv->third_offset;
+ num_pages = sarea_priv->third_handle ? 3 : 2;
spin_lock(&dev->drw_lock);
@@ -154,6 +266,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
for (i = 0; i++ < nhits;
upper[0] = lower[0], lower[0] += slice[0],
upper[1] = lower[1], lower[1] += slice[1]) {
+ int init_drawrect = 1;
+
if (i == nhits)
lower[0] = lower[1] = sarea_priv->height;
@@ -161,7 +275,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
drm_i915_vbl_swap_t *swap_hit =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_clip_rect *rect;
- int num_rects, pipe;
+ int num_rects, plane, front, back;
unsigned short top, bottom;
drw = drm_get_drawable_info(dev, swap_hit->drw_id);
@@ -169,10 +283,50 @@ static void i915_vblank_tasklet(struct drm_device *dev)
if (!drw)
continue;
+ plane = swap_hit->plane;
+
+ if (swap_hit->flip) {
+ i915_dispatch_vsync_flip(dev, drw, plane);
+ continue;
+ }
+
+ if (init_drawrect) {
+ int width = sarea_priv->width;
+ int height = sarea_priv->height;
+ if (IS_I965G(dev)) {
+ BEGIN_LP_RING(4);
+
+ OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+ OUT_RING(0);
+ OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
+ OUT_RING(0);
+
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(6);
+
+ OUT_RING(GFX_OP_DRAWRECT_INFO);
+ OUT_RING(0);
+ OUT_RING(0);
+ OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
+ OUT_RING(0);
+ OUT_RING(0);
+
+ ADVANCE_LP_RING();
+ }
+
+ sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
+
+ init_drawrect = 0;
+ }
+
rect = drw->rects;
- pipe = swap_hit->pipe;
- top = upper[pipe];
- bottom = lower[pipe];
+ top = upper[plane];
+ bottom = lower[plane];
+
+ front = (dev_priv->sarea_priv->pf_current_page >>
+ (2 * plane)) & 0x3;
+ back = (front + 1) % num_pages;
for (num_rects = drw->num_rects; num_rects--; rect++) {
int y1 = max(rect->y1, top);
@@ -184,20 +338,20 @@ static void i915_vblank_tasklet(struct drm_device *dev)
BEGIN_LP_RING(8);
OUT_RING(cmd);
- OUT_RING(pitchropcpp);
+ OUT_RING(ropcpp | dst_pitch);
OUT_RING((y1 << 16) | rect->x1);
OUT_RING((y2 << 16) | rect->x2);
- OUT_RING(sarea_priv->front_offset);
+ OUT_RING(offsets[front]);
OUT_RING((y1 << 16) | rect->x1);
- OUT_RING(pitchropcpp & 0xffff);
- OUT_RING(sarea_priv->back_offset);
+ OUT_RING(src_pitch);
+ OUT_RING(offsets[back]);
ADVANCE_LP_RING();
}
}
}
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ spin_unlock(&dev->drw_lock);
list_for_each_safe(hit, tmp, &hits) {
drm_i915_vbl_swap_t *swap_hit =
@@ -209,67 +363,112 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}
}
+u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long high_frame;
+ unsigned long low_frame;
+ u32 high1, high2, low, count;
+ int pipe;
+
+ pipe = i915_get_pipe(dev, plane);
+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ printk(KERN_ERR "trying to get vblank count for disabled "
+ "pipe %d\n", pipe);
+ return 0;
+ }
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ do {
+ high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+ PIPE_FRAME_LOW_SHIFT);
+ high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ } while (high1 != high2);
+
+ count = (high1 << 8) | low;
+
+ /* count may be reset by other driver(e.g. 2D driver),
+ we have no way to know if it is wrapped or resetted
+ when count is zero. do a rough guess.
+ */
+ if (count == 0 && dev->last_vblank[pipe] < dev->max_vblank_count/2)
+ dev->last_vblank[pipe] = 0;
+
+ return count;
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u16 temp;
+ u32 iir;
u32 pipea_stats, pipeb_stats;
-
- pipea_stats = I915_READ(I915REG_PIPEASTAT);
- pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
-
- temp = I915_READ16(I915REG_INT_IDENTITY_R);
-
- temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
-
- DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
-
- if (temp == 0)
+ int vblank = 0;
+
+ iir = I915_READ(I915REG_INT_IDENTITY_R);
+ if (iir == 0) {
+ DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
+ iir,
+ I915_READ(I915REG_INT_MASK_R),
+ I915_READ(I915REG_INT_ENABLE_R),
+ I915_READ(I915REG_PIPEASTAT),
+ I915_READ(I915REG_PIPEBSTAT));
return IRQ_NONE;
+ }
- I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
- (void) I915_READ16(I915REG_INT_IDENTITY_R);
- DRM_READMEMORYBARRIER();
-
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ /*
+ * Clear the PIPE(A|B)STAT regs before the IIR otherwise
+ * we may get extra interrupts.
+ */
+ if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
+ pipea_stats = I915_READ(I915REG_PIPEASTAT);
+ if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
+ I915_VBLANK_INTERRUPT_STATUS))
+ {
+ vblank++;
+ drm_handle_vblank(dev, i915_get_plane(dev, 0));
+ }
+ I915_WRITE(I915REG_PIPEASTAT, pipea_stats);
+ }
+ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
+ pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
+ if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
+ I915_VBLANK_INTERRUPT_STATUS))
+ {
+ vblank++;
+ drm_handle_vblank(dev, i915_get_plane(dev, 1));
+ }
+ I915_WRITE(I915REG_PIPEBSTAT, pipeb_stats);
+ }
- if (temp & USER_INT_FLAG)
- DRM_WAKEUP(&dev_priv->irq_queue);
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
- int vblank_pipe = dev_priv->vblank_pipe;
-
- if ((vblank_pipe &
- (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
- == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
- if (temp & VSYNC_PIPEA_FLAG)
- atomic_inc(&dev->vbl_received);
- if (temp & VSYNC_PIPEB_FLAG)
- atomic_inc(&dev->vbl_received2);
- } else if (((temp & VSYNC_PIPEA_FLAG) &&
- (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
- ((temp & VSYNC_PIPEB_FLAG) &&
- (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
- atomic_inc(&dev->vbl_received);
-
- DRM_WAKEUP(&dev->vbl_queue);
- drm_vbl_send_signals(dev);
+ I915_WRITE(I915REG_INT_IDENTITY_R, iir);
+ (void) I915_READ(I915REG_INT_IDENTITY_R); /* Flush posted write */
+ if (iir & I915_USER_INTERRUPT) {
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ }
+ if (vblank) {
if (dev_priv->swaps_pending > 0)
drm_locked_tasklet(dev, i915_vblank_tasklet);
- I915_WRITE(I915REG_PIPEASTAT,
- pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
- I915_VBLANK_CLEAR);
- I915_WRITE(I915REG_PIPEBSTAT,
- pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
- I915_VBLANK_CLEAR);
}
return IRQ_HANDLED;
}
-static int i915_emit_irq(struct drm_device * dev)
+static int i915_emit_irq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -316,42 +515,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return ret;
-}
-
-static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
- atomic_t *counter)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned int cur_vblank;
- int ret = 0;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
- (((cur_vblank = atomic_read(counter))
- - *sequence) <= (1<<23)));
-
- *sequence = cur_vblank;
-
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
return ret;
}
-
-int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
-{
- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
-}
-
-int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
-{
- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
-}
-
/* Needs the lock as it touches the ring.
*/
int i915_irq_emit(struct drm_device *dev, void *data,
@@ -394,18 +563,96 @@ int i915_irq_wait(struct drm_device *dev, void *data,
return i915_wait_irq(dev, irqwait->irq_seq);
}
+int i915_enable_vblank(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe = i915_get_pipe(dev, plane);
+ u32 pipestat_reg = 0;
+ u32 pipestat;
+
+ switch (pipe) {
+ case 0:
+ pipestat_reg = I915REG_PIPEASTAT;
+ dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ break;
+ case 1:
+ pipestat_reg = I915REG_PIPEBSTAT;
+ dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ break;
+ default:
+ DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
+ pipe);
+ break;
+ }
+
+ if (pipestat_reg)
+ {
+ pipestat = I915_READ (pipestat_reg);
+ /*
+ * Older chips didn't have the start vblank interrupt,
+ * but
+ */
+ if (IS_I965G (dev))
+ pipestat |= I915_START_VBLANK_INTERRUPT_ENABLE;
+ else
+ pipestat |= I915_VBLANK_INTERRUPT_ENABLE;
+ /*
+ * Clear any pending status
+ */
+ pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
+ I915_VBLANK_INTERRUPT_STATUS);
+ I915_WRITE(pipestat_reg, pipestat);
+ }
+ I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+
+ return 0;
+}
+
+void i915_disable_vblank(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe = i915_get_pipe(dev, plane);
+ u32 pipestat_reg = 0;
+ u32 pipestat;
+
+ switch (pipe) {
+ case 0:
+ pipestat_reg = I915REG_PIPEASTAT;
+ dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ break;
+ case 1:
+ pipestat_reg = I915REG_PIPEBSTAT;
+ dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ break;
+ default:
+ DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
+ pipe);
+ break;
+ }
+
+ I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+ if (pipestat_reg)
+ {
+ pipestat = I915_READ (pipestat_reg);
+ pipestat &= ~(I915_START_VBLANK_INTERRUPT_ENABLE |
+ I915_VBLANK_INTERRUPT_ENABLE);
+ /*
+ * Clear any pending status
+ */
+ pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
+ I915_VBLANK_INTERRUPT_STATUS);
+ I915_WRITE(pipestat_reg, pipestat);
+ }
+}
+
static void i915_enable_interrupt (struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u16 flag;
- flag = 0;
- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
- flag |= VSYNC_PIPEA_FLAG;
- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
- flag |= VSYNC_PIPEB_FLAG;
+ dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
- I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
+ I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+ dev_priv->irq_enabled = 1;
}
/* Set the vblank monitor pipe
@@ -428,8 +675,6 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
dev_priv->vblank_pipe = pipe->pipe;
- i915_enable_interrupt (dev);
-
return 0;
}
@@ -447,9 +692,9 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
flag = I915_READ(I915REG_INT_ENABLE_R);
pipe->pipe = 0;
- if (flag & VSYNC_PIPEA_FLAG)
+ if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
- if (flag & VSYNC_PIPEB_FLAG)
+ if (flag & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
return 0;
@@ -464,27 +709,30 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_swap_t *swap = data;
drm_i915_vbl_swap_t *vbl_swap;
- unsigned int pipe, seqtype, curseq;
+ unsigned int pipe, seqtype, curseq, plane;
unsigned long irqflags;
struct list_head *list;
+ int ret;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __func__);
return -EINVAL;
}
- if (dev_priv->sarea_priv->rotation) {
+ if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) {
DRM_DEBUG("Rotation not supported\n");
return -EINVAL;
}
if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
- _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
+ _DRM_VBLANK_FLIP)) {
DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
return -EINVAL;
}
- pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+ plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+ pipe = i915_get_pipe(dev, plane);
seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
@@ -495,6 +743,11 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
spin_lock_irqsave(&dev->drw_lock, irqflags);
+ /* It makes no sense to schedule a swap for a drawable that doesn't have
+ * valid information at this point. E.g. this could mean that the X
+ * server is too old to push drawable information to the DRM, in which
+ * case all such swaps would become ineffective.
+ */
if (!drm_get_drawable_info(dev, swap->drawable)) {
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
@@ -503,7 +756,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
+ drm_update_vblank_count(dev, pipe);
+ curseq = drm_vblank_count(dev, pipe);
if (seqtype == _DRM_VBLANK_RELATIVE)
swap->sequence += curseq;
@@ -517,14 +771,43 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
}
}
+ if (swap->seqtype & _DRM_VBLANK_FLIP) {
+ swap->sequence--;
+
+ if ((curseq - swap->sequence) <= (1<<23)) {
+ struct drm_drawable_info *drw;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+ drw = drm_get_drawable_info(dev, swap->drawable);
+
+ if (!drw) {
+ spin_unlock_irqrestore(&dev->drw_lock,
+ irqflags);
+ DRM_DEBUG("Invalid drawable ID %d\n",
+ swap->drawable);
+ return -EINVAL;
+ }
+
+ i915_dispatch_vsync_flip(dev, drw, plane);
+
+ spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+ return 0;
+ }
+ }
+
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
list_for_each(list, &dev_priv->vbl_swaps.head) {
vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
if (vbl_swap->drw_id == swap->drawable &&
- vbl_swap->pipe == pipe &&
+ vbl_swap->plane == plane &&
vbl_swap->sequence == swap->sequence) {
+ vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
DRM_DEBUG("Already scheduled\n");
return 0;
@@ -547,9 +830,19 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
DRM_DEBUG("\n");
+ ret = drm_vblank_get(dev, pipe);
+ if (ret) {
+ drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+ return ret;
+ }
+
vbl_swap->drw_id = swap->drawable;
- vbl_swap->pipe = pipe;
+ vbl_swap->plane = plane;
vbl_swap->sequence = swap->sequence;
+ vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
+
+ if (vbl_swap->flip)
+ swap->sequence++;
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
@@ -567,37 +860,57 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- I915_WRITE16(I915REG_HWSTAM, 0xfffe);
+ I915_WRITE16(I915REG_HWSTAM, 0xeffe);
I915_WRITE16(I915REG_INT_MASK_R, 0x0);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
}
-void i915_driver_irq_postinstall(struct drm_device * dev)
+int i915_driver_irq_postinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int ret, num_pipes = 2;
spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
- if (!dev_priv->vblank_pipe)
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
+ dev_priv->user_irq_refcount = 0;
+ dev_priv->irq_enable_reg = 0;
+
+ ret = drm_vblank_init(dev, num_pipes);
+ if (ret)
+ return ret;
+
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
i915_enable_interrupt(dev);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+
+ /*
+ * Initialize the hardware status page IRQ location.
+ */
+
+ I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
+ return 0;
}
void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u16 temp;
+ u32 temp;
if (!dev_priv)
return;
- I915_WRITE16(I915REG_HWSTAM, 0xffff);
- I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
- I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
-
- temp = I915_READ16(I915REG_INT_IDENTITY_R);
- I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+ dev_priv->irq_enabled = 0;
+ I915_WRITE(I915REG_HWSTAM, 0xffffffff);
+ I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
+ I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
+
+ temp = I915_READ(I915REG_PIPEASTAT);
+ I915_WRITE(I915REG_PIPEASTAT, temp);
+ temp = I915_READ(I915REG_PIPEBSTAT);
+ I915_WRITE(I915REG_PIPEBSTAT, temp);
+ temp = I915_READ(I915REG_INT_IDENTITY_R);
+ I915_WRITE(I915REG_INT_IDENTITY_R, temp);
}
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index 5572939fc7d..6b3790939e7 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -45,15 +45,16 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
- DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_IRQ_VBL,
+ DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_mga_buf_priv_t),
.load = mga_driver_load,
.unload = mga_driver_unload,
.lastclose = mga_driver_lastclose,
.dma_quiescent = mga_driver_dma_quiescent,
.device_is_agp = mga_driver_device_is_agp,
- .vblank_wait = mga_driver_vblank_wait,
+ .get_vblank_counter = mga_get_vblank_counter,
+ .enable_vblank = mga_enable_vblank,
+ .disable_vblank = mga_disable_vblank,
.irq_preinstall = mga_driver_irq_preinstall,
.irq_postinstall = mga_driver_irq_postinstall,
.irq_uninstall = mga_driver_irq_uninstall,
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index f6ebd24bd58..8f7291f3636 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -120,6 +120,7 @@ typedef struct drm_mga_private {
u32 clear_cmd;
u32 maccess;
+ atomic_t vbl_received; /**< Number of vblanks received. */
wait_queue_head_t fence_queue;
atomic_t last_fence_retired;
u32 next_fence_to_post;
@@ -181,11 +182,14 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
/* mga_irq.c */
+extern int mga_enable_vblank(struct drm_device *dev, int crtc);
+extern void mga_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
extern void mga_driver_irq_preinstall(struct drm_device * dev);
-extern void mga_driver_irq_postinstall(struct drm_device * dev);
+extern int mga_driver_irq_postinstall(struct drm_device * dev);
extern void mga_driver_irq_uninstall(struct drm_device * dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c
index 9302cb8f0f8..06852fb4b27 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/char/drm/mga_irq.c
@@ -35,6 +35,20 @@
#include "mga_drm.h"
#include "mga_drv.h"
+u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ const drm_mga_private_t *const dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+
+ if (crtc != 0) {
+ return 0;
+ }
+
+
+ return atomic_read(&dev_priv->vbl_received);
+}
+
+
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -47,9 +61,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* VBLANK interrupt */
if (status & MGA_VLINEPEN) {
MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
- atomic_inc(&dev->vbl_received);
- DRM_WAKEUP(&dev->vbl_queue);
- drm_vbl_send_signals(dev);
+ atomic_inc(&dev_priv->vbl_received);
+ drm_handle_vblank(dev, 0);
handled = 1;
}
@@ -78,22 +91,34 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
}
-int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+int mga_enable_vblank(struct drm_device *dev, int crtc)
{
- unsigned int cur_vblank;
- int ret = 0;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- /* Assume that the user has missed the current sequence number
- * by about a day rather than she wants to wait for years
- * using vertical blanks...
- */
- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
- (((cur_vblank = atomic_read(&dev->vbl_received))
- - *sequence) <= (1 << 23)));
+ if (crtc != 0) {
+ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+ crtc);
+ return 0;
+ }
- *sequence = cur_vblank;
+ MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
+ return 0;
+}
- return ret;
+
+void mga_disable_vblank(struct drm_device *dev, int crtc)
+{
+ if (crtc != 0) {
+ DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
+ crtc);
+ }
+
+ /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
+ * a nice hardware counter that tracks the number of refreshes when
+ * the interrupt is disabled, and the kernel doesn't know the refresh
+ * rate to calculate an estimate.
+ */
+ /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
@@ -125,14 +150,22 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
MGA_WRITE(MGA_ICLEAR, ~0);
}
-void mga_driver_irq_postinstall(struct drm_device * dev)
+int mga_driver_irq_postinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+ int ret;
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ret;
DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
- /* Turn on vertical blank interrupt and soft trap interrupt. */
- MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
+ /* Turn on soft trap interrupt. Vertical blank interrupts are enabled
+ * in mga_enable_vblank.
+ */
+ MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
+ return 0;
}
void mga_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/char/drm/r128_drv.c b/drivers/char/drm/r128_drv.c
index 6108e7587e1..2888aa01ebc 100644
--- a/drivers/char/drm/r128_drv.c
+++ b/drivers/char/drm/r128_drv.c
@@ -43,12 +43,13 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_IRQ_VBL,
+ DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
.preclose = r128_driver_preclose,
.lastclose = r128_driver_lastclose,
- .vblank_wait = r128_driver_vblank_wait,
+ .get_vblank_counter = r128_get_vblank_counter,
+ .enable_vblank = r128_enable_vblank,
+ .disable_vblank = r128_disable_vblank,
.irq_preinstall = r128_driver_irq_preinstall,
.irq_postinstall = r128_driver_irq_postinstall,
.irq_uninstall = r128_driver_irq_uninstall,
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 011105e51ac..80af9e09e75 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -97,6 +97,8 @@ typedef struct drm_r128_private {
u32 crtc_offset;
u32 crtc_offset_cntl;
+ atomic_t vbl_received;
+
u32 color_fmt;
unsigned int front_offset;
unsigned int front_pitch;
@@ -149,11 +151,12 @@ extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
extern int r128_do_cleanup_cce(struct drm_device * dev);
-extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
-
+extern int r128_enable_vblank(struct drm_device *dev, int crtc);
+extern void r128_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
extern void r128_driver_irq_preinstall(struct drm_device * dev);
-extern void r128_driver_irq_postinstall(struct drm_device * dev);
+extern int r128_driver_irq_postinstall(struct drm_device * dev);
extern void r128_driver_irq_uninstall(struct drm_device * dev);
extern void r128_driver_lastclose(struct drm_device * dev);
extern void r128_driver_preclose(struct drm_device * dev,
diff --git a/drivers/char/drm/r128_irq.c b/drivers/char/drm/r128_irq.c
index c76fdca7662..5b95bd898f9 100644
--- a/drivers/char/drm/r128_irq.c
+++ b/drivers/char/drm/r128_irq.c
@@ -35,6 +35,16 @@
#include "r128_drm.h"
#include "r128_drv.h"
+u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ const drm_r128_private_t *dev_priv = dev->dev_private;
+
+ if (crtc != 0)
+ return 0;
+
+ return atomic_read(&dev_priv->vbl_received);
+}
+
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -46,30 +56,38 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
/* VBLANK interrupt */
if (status & R128_CRTC_VBLANK_INT) {
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
- atomic_inc(&dev->vbl_received);
- DRM_WAKEUP(&dev->vbl_queue);
- drm_vbl_send_signals(dev);
+ atomic_inc(&dev_priv->vbl_received);
+ drm_handle_vblank(dev, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
-int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+int r128_enable_vblank(struct drm_device *dev, int crtc)
{
- unsigned int cur_vblank;
- int ret = 0;
+ drm_r128_private_t *dev_priv = dev->dev_private;
- /* Assume that the user has missed the current sequence number
- * by about a day rather than she wants to wait for years
- * using vertical blanks...
- */
- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
- (((cur_vblank = atomic_read(&dev->vbl_received))
- - *sequence) <= (1 << 23)));
+ if (crtc != 0) {
+ DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
+ return -EINVAL;
+ }
- *sequence = cur_vblank;
+ R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
+ return 0;
+}
+
+void r128_disable_vblank(struct drm_device *dev, int crtc)
+{
+ if (crtc != 0)
+ DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
- return ret;
+ /*
+ * FIXME: implement proper interrupt disable by using the vblank
+ * counter register (if available)
+ *
+ * R128_WRITE(R128_GEN_INT_CNTL,
+ * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
+ */
}
void r128_driver_irq_preinstall(struct drm_device * dev)
@@ -82,12 +100,9 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
}
-void r128_driver_irq_postinstall(struct drm_device * dev)
+int r128_driver_irq_postinstall(struct drm_device * dev)
{
- drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
-
- /* Turn on VBL interrupt */
- R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
+ return drm_vblank_init(dev, 1);
}
void r128_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 349ac3d3b84..a2610319624 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -59,8 +59,7 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
- DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
+ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
.firstopen = radeon_driver_firstopen,
@@ -69,8 +68,9 @@ static struct drm_driver driver = {
.postclose = radeon_driver_postclose,
.lastclose = radeon_driver_lastclose,
.unload = radeon_driver_unload,
- .vblank_wait = radeon_driver_vblank_wait,
- .vblank_wait2 = radeon_driver_vblank_wait2,
+ .get_vblank_counter = radeon_get_vblank_counter,
+ .enable_vblank = radeon_enable_vblank,
+ .disable_vblank = radeon_disable_vblank,
.dri_library_name = dri_library_name,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 173ae620223..b791420bd3d 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -304,6 +304,9 @@ typedef struct drm_radeon_private {
u32 scratch_ages[5];
+ unsigned int crtc_last_cnt;
+ unsigned int crtc2_last_cnt;
+
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
unsigned long fb_aper_offset;
@@ -374,13 +377,13 @@ extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *
extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern void radeon_do_release(struct drm_device * dev);
-extern int radeon_driver_vblank_wait(struct drm_device * dev,
- unsigned int *sequence);
-extern int radeon_driver_vblank_wait2(struct drm_device * dev,
- unsigned int *sequence);
+extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
+extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
+extern void radeon_do_release(struct drm_device * dev);
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
extern void radeon_driver_irq_preinstall(struct drm_device * dev);
-extern void radeon_driver_irq_postinstall(struct drm_device * dev);
+extern int radeon_driver_irq_postinstall(struct drm_device * dev);
extern void radeon_driver_irq_uninstall(struct drm_device * dev);
extern int radeon_vblank_crtc_get(struct drm_device *dev);
extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
@@ -558,6 +561,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
: RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
+#define RADEON_CRTC_CRNT_FRAME 0x0214
+#define RADEON_CRTC2_CRNT_FRAME 0x0314
+
+#define RADEON_CRTC_STATUS 0x005c
+#define RADEON_CRTC2_STATUS 0x03fc
+
#define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index 009af3814b6..507d6b747a1 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -35,12 +35,61 @@
#include "radeon_drm.h"
#include "radeon_drv.h"
-static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
- u32 mask)
+static void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
{
- u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (state)
+ dev_priv->irq_enable_reg |= mask;
+ else
+ dev_priv->irq_enable_reg &= ~mask;
+
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+}
+
+int radeon_enable_vblank(struct drm_device *dev, int crtc)
+{
+ switch (crtc) {
+ case 0:
+ radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
+ break;
+ case 1:
+ radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
+ break;
+ default:
+ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+ crtc);
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+void radeon_disable_vblank(struct drm_device *dev, int crtc)
+{
+ switch (crtc) {
+ case 0:
+ radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
+ break;
+ case 1:
+ radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
+ break;
+ default:
+ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+ crtc);
+ break;
+ }
+}
+
+static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv)
+{
+ u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) &
+ (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
+ RADEON_CRTC2_VBLANK_STAT);
+
if (irqs)
RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
+
return irqs;
}
@@ -72,39 +121,21 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
/* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
- stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
- RADEON_CRTC_VBLANK_STAT |
- RADEON_CRTC2_VBLANK_STAT));
+ stat = radeon_acknowledge_irqs(dev_priv);
if (!stat)
return IRQ_NONE;
stat &= dev_priv->irq_enable_reg;
/* SW interrupt */
- if (stat & RADEON_SW_INT_TEST) {
+ if (stat & RADEON_SW_INT_TEST)
DRM_WAKEUP(&dev_priv->swi_queue);
- }
/* VBLANK interrupt */
- if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
- int vblank_crtc = dev_priv->vblank_crtc;
-
- if ((vblank_crtc &
- (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
- (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
- if (stat & RADEON_CRTC_VBLANK_STAT)
- atomic_inc(&dev->vbl_received);
- if (stat & RADEON_CRTC2_VBLANK_STAT)
- atomic_inc(&dev->vbl_received2);
- } else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
- (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
- ((stat & RADEON_CRTC2_VBLANK_STAT) &&
- (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
- atomic_inc(&dev->vbl_received);
-
- DRM_WAKEUP(&dev->vbl_queue);
- drm_vbl_send_signals(dev);
- }
+ if (stat & RADEON_CRTC_VBLANK_STAT)
+ drm_handle_vblank(dev, 0);
+ if (stat & RADEON_CRTC2_VBLANK_STAT)
+ drm_handle_vblank(dev, 1);
return IRQ_HANDLED;
}
@@ -144,54 +175,27 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
return ret;
}
-static int radeon_driver_vblank_do_wait(struct drm_device * dev,
- unsigned int *sequence, int crtc)
+u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
{
- drm_radeon_private_t *dev_priv =
- (drm_radeon_private_t *) dev->dev_private;
- unsigned int cur_vblank;
- int ret = 0;
- int ack = 0;
- atomic_t *counter;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ u32 crtc_cnt_reg, crtc_status_reg;
+
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
- if (crtc == DRM_RADEON_VBLANK_CRTC1) {
- counter = &dev->vbl_received;
- ack |= RADEON_CRTC_VBLANK_STAT;
- } else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
- counter = &dev->vbl_received2;
- ack |= RADEON_CRTC2_VBLANK_STAT;
- } else
+ if (crtc == 0) {
+ crtc_cnt_reg = RADEON_CRTC_CRNT_FRAME;
+ crtc_status_reg = RADEON_CRTC_STATUS;
+ } else if (crtc == 1) {
+ crtc_cnt_reg = RADEON_CRTC2_CRNT_FRAME;
+ crtc_status_reg = RADEON_CRTC2_STATUS;
+ } else {
return -EINVAL;
+ }
- radeon_acknowledge_irqs(dev_priv, ack);
-
- dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
-
- /* Assume that the user has missed the current sequence number
- * by about a day rather than she wants to wait for years
- * using vertical blanks...
- */
- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
- (((cur_vblank = atomic_read(counter))
- - *sequence) <= (1 << 23)));
-
- *sequence = cur_vblank;
-
- return ret;
-}
-
-int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
-{
- return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
-}
-
-int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
-{
- return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
+ return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1);
}
/* Needs the lock as it touches the ring.
@@ -234,21 +238,6 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
return radeon_wait_irq(dev, irqwait->irq_seq);
}
-static void radeon_enable_interrupt(struct drm_device *dev)
-{
- drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
-
- dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
- if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
- dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
-
- if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
- dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
-
- RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
- dev_priv->irq_enabled = 1;
-}
-
/* drm_dma.h hooks
*/
void radeon_driver_irq_preinstall(struct drm_device * dev)
@@ -260,20 +249,27 @@ void radeon_driver_irq_preinstall(struct drm_device * dev)
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
/* Clear bits if they're already high */
- radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
- RADEON_CRTC_VBLANK_STAT |
- RADEON_CRTC2_VBLANK_STAT));
+ radeon_acknowledge_irqs(dev_priv);
}
-void radeon_driver_irq_postinstall(struct drm_device * dev)
+int radeon_driver_irq_postinstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
+ int ret;
atomic_set(&dev_priv->swi_emitted, 0);
DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
- radeon_enable_interrupt(dev);
+ ret = drm_vblank_init(dev, 2);
+ if (ret)
+ return ret;
+
+ dev->max_vblank_count = 0x001fffff;
+
+ radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+
+ return 0;
}
void radeon_driver_irq_uninstall(struct drm_device * dev)
@@ -315,6 +311,5 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
return -EINVAL;
}
dev_priv->vblank_crtc = (unsigned int)value;
- radeon_enable_interrupt(dev);
return 0;
}
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c
index 80c01cdfa37..37870a4a3dc 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/char/drm/via_drv.c
@@ -40,11 +40,13 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
- DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+ DRIVER_IRQ_SHARED,
.load = via_driver_load,
.unload = via_driver_unload,
.context_dtor = via_final_context,
- .vblank_wait = via_driver_vblank_wait,
+ .get_vblank_counter = via_get_vblank_counter,
+ .enable_vblank = via_enable_vblank,
+ .disable_vblank = via_disable_vblank,
.irq_preinstall = via_driver_irq_preinstall,
.irq_postinstall = via_driver_irq_postinstall,
.irq_uninstall = via_driver_irq_uninstall,
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index 2daae81874c..fe67030e39a 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -75,6 +75,7 @@ typedef struct drm_via_private {
struct timeval last_vblank;
int last_vblank_valid;
unsigned usec_per_vblank;
+ atomic_t vbl_received;
drm_via_state_t hc_state;
char pci_buf[VIA_PCI_BUF_SIZE];
const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
@@ -130,11 +131,13 @@ extern int via_init_context(struct drm_device * dev, int context);
extern int via_final_context(struct drm_device * dev, int context);
extern int via_do_cleanup_map(struct drm_device * dev);
-extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
+extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int via_enable_vblank(struct drm_device *dev, int crtc);
+extern void via_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
extern void via_driver_irq_preinstall(struct drm_device * dev);
-extern void via_driver_irq_postinstall(struct drm_device * dev);
+extern int via_driver_irq_postinstall(struct drm_device * dev);
extern void via_driver_irq_uninstall(struct drm_device * dev);
extern int via_dma_cleanup(struct drm_device * dev);
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index c6bb978a110..f1ab6fc7c07 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -92,8 +92,17 @@ static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
static unsigned time_diff(struct timeval *now, struct timeval *then)
{
return (now->tv_usec >= then->tv_usec) ?
- now->tv_usec - then->tv_usec :
- 1000000 - (then->tv_usec - now->tv_usec);
+ now->tv_usec - then->tv_usec :
+ 1000000 - (then->tv_usec - now->tv_usec);
+}
+
+u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+ if (crtc != 0)
+ return 0;
+
+ return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@@ -108,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
status = VIA_READ(VIA_REG_INTERRUPT);
if (status & VIA_IRQ_VBLANK_PENDING) {
- atomic_inc(&dev->vbl_received);
- if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
+ atomic_inc(&dev_priv->vbl_received);
+ if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
do_gettimeofday(&cur_vblank);
if (dev_priv->last_vblank_valid) {
dev_priv->usec_per_vblank =
@@ -119,12 +128,11 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
dev_priv->last_vblank = cur_vblank;
dev_priv->last_vblank_valid = 1;
}
- if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
+ if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
DRM_DEBUG("US per vblank is: %u\n",
dev_priv->usec_per_vblank);
}
- DRM_WAKEUP(&dev->vbl_queue);
- drm_vbl_send_signals(dev);
+ drm_handle_vblank(dev, 0);
handled = 1;
}
@@ -163,31 +171,34 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
}
}
-int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+int via_enable_vblank(struct drm_device *dev, int crtc)
{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- unsigned int cur_vblank;
- int ret = 0;
+ drm_via_private_t *dev_priv = dev->dev_private;
+ u32 status;
- DRM_DEBUG("\n");
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
+ if (crtc != 0) {
+ DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
return -EINVAL;
}
- viadrv_acknowledge_irqs(dev_priv);
+ status = VIA_READ(VIA_REG_INTERRUPT);
+ VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
- /* Assume that the user has missed the current sequence number
- * by about a day rather than she wants to wait for years
- * using vertical blanks...
- */
+ VIA_WRITE8(0x83d4, 0x11);
+ VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
- (((cur_vblank = atomic_read(&dev->vbl_received)) -
- *sequence) <= (1 << 23)));
+ return 0;
+}
- *sequence = cur_vblank;
- return ret;
+void via_disable_vblank(struct drm_device *dev, int crtc)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ VIA_WRITE8(0x83d4, 0x11);
+ VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
+
+ if (crtc != 0)
+ DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
}
static int
@@ -292,23 +303,25 @@ void via_driver_irq_preinstall(struct drm_device * dev)
}
}
-void via_driver_irq_postinstall(struct drm_device * dev)
+int via_driver_irq_postinstall(struct drm_device * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
- DRM_DEBUG("\n");
- if (dev_priv) {
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
- | dev_priv->irq_enable_mask);
+ DRM_DEBUG("via_driver_irq_postinstall\n");
+ if (!dev_priv)
+ return -EINVAL;
- /* Some magic, oh for some data sheets ! */
+ drm_vblank_init(dev, 1);
+ status = VIA_READ(VIA_REG_INTERRUPT);
+ VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
+ | dev_priv->irq_enable_mask);
- VIA_WRITE8(0x83d4, 0x11);
- VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+ /* Some magic, oh for some data sheets ! */
+ VIA_WRITE8(0x83d4, 0x11);
+ VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
- }
+ return 0;
}
void via_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 87532034d10..3f9e10001e1 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -1031,7 +1031,7 @@ comment "Other IDE chipsets support"
comment "Note: most of these also require special kernel boot parameters"
config BLK_DEV_4DRIVES
- bool "Generic 4 drives/port support"
+ tristate "Generic 4 drives/port support"
help
Certain older chipsets, including the Tekram 690CD, use a single set
of I/O ports at 0x1f0 to control up to four drives, instead of the
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
index ec46c44b061..713cef20622 100644
--- a/drivers/ide/arm/bast-ide.c
+++ b/drivers/ide/arm/bast-ide.c
@@ -21,6 +21,8 @@
#include <asm/arch/bast-map.h>
#include <asm/arch/bast-irq.h>
+#define DRV_NAME "bast-ide"
+
static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
{
ide_hwif_t *hwif;
@@ -33,27 +35,23 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
base += BAST_IDE_CS;
aux += BAST_IDE_CS;
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw.io_ports[i] = (unsigned long)base;
+ for (i = 0; i <= 7; i++) {
+ hw.io_ports_array[i] = (unsigned long)base;
base += 0x20;
}
- hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20);
+ hw.io_ports.ctl_addr = aux + (6 * 0x20);
hw.irq = irq;
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif == NULL)
goto out;
i = hwif->index;
- if (hwif->present)
- ide_unregister(i);
- else
- ide_init_port_data(hwif, i);
-
+ ide_init_port_data(hwif, i);
ide_init_port_hw(hwif, &hw);
- hwif->quirkproc = NULL;
+ hwif->port_ops = NULL;
idx[0] = i;
@@ -64,6 +62,8 @@ out:
static int __init bastide_init(void)
{
+ unsigned long base = BAST_VA_IDEPRI + BAST_IDE_CS;
+
/* we can treat the VR1000 and the BAST the same */
if (!(machine_is_bast() || machine_is_vr1000()))
@@ -71,6 +71,11 @@ static int __init bastide_init(void)
printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n");
+ if (!request_mem_region(base, 0x400000, DRV_NAME)) {
+ printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
+ return -EBUSY;
+ }
+
bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0);
bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1);
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index e816b0ffcfe..124445c2092 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -191,6 +191,10 @@ static void icside_maskproc(ide_drive_t *drive, int mask)
local_irq_restore(flags);
}
+static const struct ide_port_ops icside_v6_no_dma_port_ops = {
+ .maskproc = icside_maskproc,
+};
+
#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
/*
* SG-DMA support.
@@ -266,6 +270,11 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
}
+static const struct ide_port_ops icside_v6_port_ops = {
+ .set_dma_mode = icside_set_dma_mode,
+ .maskproc = icside_maskproc,
+};
+
static void icside_dma_host_set(ide_drive_t *drive, int on)
{
}
@@ -375,32 +384,40 @@ static void icside_dma_lost_irq(ide_drive_t *drive)
printk(KERN_ERR "%s: IRQ lost\n", drive->name);
}
-static void icside_dma_init(ide_hwif_t *hwif)
+static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
{
hwif->dmatable_cpu = NULL;
hwif->dmatable_dma = 0;
- hwif->set_dma_mode = icside_set_dma_mode;
-
- hwif->dma_host_set = icside_dma_host_set;
- hwif->dma_setup = icside_dma_setup;
- hwif->dma_exec_cmd = icside_dma_exec_cmd;
- hwif->dma_start = icside_dma_start;
- hwif->ide_dma_end = icside_dma_end;
- hwif->ide_dma_test_irq = icside_dma_test_irq;
- hwif->dma_timeout = icside_dma_timeout;
- hwif->dma_lost_irq = icside_dma_lost_irq;
+
+ return 0;
}
+
+static const struct ide_dma_ops icside_v6_dma_ops = {
+ .dma_host_set = icside_dma_host_set,
+ .dma_setup = icside_dma_setup,
+ .dma_exec_cmd = icside_dma_exec_cmd,
+ .dma_start = icside_dma_start,
+ .dma_end = icside_dma_end,
+ .dma_test_irq = icside_dma_test_irq,
+ .dma_timeout = icside_dma_timeout,
+ .dma_lost_irq = icside_dma_lost_irq,
+};
#else
-#define icside_dma_init(hwif) (0)
+#define icside_v6_dma_ops NULL
#endif
+static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+ return -EOPNOTSUPP;
+}
+
static ide_hwif_t *
icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec)
{
unsigned long port = (unsigned long)base + info->dataoffset;
ide_hwif_t *hwif;
- hwif = ide_find_port(port);
+ hwif = ide_find_port();
if (hwif) {
int i;
@@ -408,15 +425,14 @@ icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *e
* Ensure we're using MMIO
*/
default_hwif_mmiops(hwif);
- hwif->mmio = 1;
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hwif->io_ports[i] = port;
+ for (i = 0; i <= 7; i++) {
+ hwif->io_ports_array[i] = port;
port += 1 << info->stepping;
}
- hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)base + info->ctrloffset;
+ hwif->io_ports.ctl_addr =
+ (unsigned long)base + info->ctrloffset;
hwif->irq = ec->irq;
- hwif->noprobe = 0;
hwif->chipset = ide_acorn;
hwif->gendev.parent = &ec->dev;
hwif->dev = &ec->dev;
@@ -462,9 +478,10 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
}
static const struct ide_port_info icside_v6_port_info __initdata = {
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
- IDE_HFLAG_NO_AUTOTUNE,
+ .init_dma = icside_dma_off_init,
+ .port_ops = &icside_v6_no_dma_port_ops,
+ .dma_ops = &icside_v6_dma_ops,
+ .host_flags = IDE_HFLAG_SERIALIZE,
.mwdma_mask = ATA_MWDMA2,
.swdma_mask = ATA_SWDMA2,
};
@@ -526,21 +543,19 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
state->hwif[0] = hwif;
state->hwif[1] = mate;
- hwif->maskproc = icside_maskproc;
hwif->hwif_data = state;
hwif->config_data = (unsigned long)ioc_base;
hwif->select_data = sel;
- mate->maskproc = icside_maskproc;
mate->hwif_data = state;
mate->config_data = (unsigned long)ioc_base;
mate->select_data = sel | 1;
if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) {
- icside_dma_init(hwif);
- icside_dma_init(mate);
- } else
- d.mwdma_mask = d.swdma_mask = 0;
+ d.init_dma = icside_dma_init;
+ d.port_ops = &icside_v6_port_ops;
+ d.dma_ops = NULL;
+ }
idx[0] = hwif->index;
idx[1] = mate->index;
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index be9ff7334c5..4263ffd4ab2 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -14,6 +14,8 @@
#include <asm/mach-types.h>
#include <asm/irq.h>
+#define DRV_NAME "ide_arm"
+
#ifdef CONFIG_ARCH_CLPS7500
# include <asm/arch/hardware.h>
#
@@ -28,13 +30,27 @@ static int __init ide_arm_init(void)
{
ide_hwif_t *hwif;
hw_regs_t hw;
+ unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206;
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ if (!request_region(base, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+ DRV_NAME, base, base + 7);
+ return -EBUSY;
+ }
+
+ if (!request_region(ctl, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+ DRV_NAME, ctl);
+ release_region(base, 8);
+ return -EBUSY;
+ }
+
memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206);
+ ide_std_init_ports(&hw, base, ctl);
hw.irq = IDE_ARM_IRQ;
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif) {
ide_init_port_hw(hwif, &hw);
idx[0] = hwif->index;
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index 420fcb78a7c..aaf32541622 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -96,11 +96,11 @@ static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
u16 val16;
/* DMA Data Setup */
- t0 = (palm_bk3710_udmatimings[mode].cycletime + ide_palm_clk - 1)
- / ide_palm_clk - 1;
- tenv = (20 + ide_palm_clk - 1) / ide_palm_clk - 1;
- trp = (palm_bk3710_udmatimings[mode].rptime + ide_palm_clk - 1)
- / ide_palm_clk - 1;
+ t0 = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].cycletime,
+ ide_palm_clk) - 1;
+ tenv = DIV_ROUND_UP(20, ide_palm_clk) - 1;
+ trp = DIV_ROUND_UP(palm_bk3710_udmatimings[mode].rptime,
+ ide_palm_clk) - 1;
/* udmatim Register */
val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0);
@@ -141,8 +141,8 @@ static void palm_bk3710_setdmamode(void __iomem *base, unsigned int dev,
cycletime = max_t(int, t->cycle, min_cycle);
/* DMA Data Setup */
- t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk;
- td = (t->active + ide_palm_clk - 1) / ide_palm_clk;
+ t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
+ td = DIV_ROUND_UP(t->active, ide_palm_clk);
tkw = t0 - td - 1;
td -= 1;
@@ -168,9 +168,9 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
struct ide_timing *t;
/* PIO Data Setup */
- t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk;
- t2 = (ide_timing_find_mode(XFER_PIO_0 + mode)->active +
- ide_palm_clk - 1) / ide_palm_clk;
+ t0 = DIV_ROUND_UP(cycletime, ide_palm_clk);
+ t2 = DIV_ROUND_UP(ide_timing_find_mode(XFER_PIO_0 + mode)->active,
+ ide_palm_clk);
t2i = t0 - t2 - 1;
t2 -= 1;
@@ -192,8 +192,8 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
/* TASKFILE Setup */
t = ide_timing_find_mode(XFER_PIO_0 + mode);
- t0 = (t->cyc8b + ide_palm_clk - 1) / ide_palm_clk;
- t2 = (t->act8b + ide_palm_clk - 1) / ide_palm_clk;
+ t0 = DIV_ROUND_UP(t->cyc8b, ide_palm_clk);
+ t2 = DIV_ROUND_UP(t->act8b, ide_palm_clk);
t2i = t0 - t2 - 1;
t2 -= 1;
@@ -317,17 +317,31 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA80;
}
-static void __devinit palm_bk3710_init_hwif(ide_hwif_t *hwif)
+static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
+ const struct ide_port_info *d)
{
- hwif->set_pio_mode = palm_bk3710_set_pio_mode;
- hwif->set_dma_mode = palm_bk3710_set_dma_mode;
+ unsigned long base =
+ hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
- hwif->cable_detect = palm_bk3710_cable_detect;
+ printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
+
+ if (ide_allocate_dma_engine(hwif))
+ return -1;
+
+ ide_setup_dma(hwif, base);
+
+ return 0;
}
+static const struct ide_port_ops palm_bk3710_ports_ops = {
+ .set_pio_mode = palm_bk3710_set_pio_mode,
+ .set_dma_mode = palm_bk3710_set_dma_mode,
+ .cable_detect = palm_bk3710_cable_detect,
+};
+
static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
- .init_hwif = palm_bk3710_init_hwif,
- .host_flags = IDE_HFLAG_NO_DMA, /* hack (no PCI) */
+ .init_dma = palm_bk3710_init_dma,
+ .port_ops = &palm_bk3710_ports_ops,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA4, /* (input clk 99MHz) */
.mwdma_mask = ATA_MWDMA2,
@@ -372,30 +386,24 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
for (i = 0; i < IDE_NR_PORTS - 2; i++)
- hw.io_ports[i] = pribase + i;
- hw.io_ports[IDE_CONTROL_OFFSET] = mem->start +
+ hw.io_ports_array[i] = pribase + i;
+ hw.io_ports.ctl_addr = mem->start +
IDE_PALM_ATA_PRI_CTL_OFFSET;
hw.irq = irq->start;
hw.chipset = ide_palm3710;
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif == NULL)
goto out;
i = hwif->index;
- if (hwif->present)
- ide_unregister(i);
- else
- ide_init_port_data(hwif, i);
-
+ ide_init_port_data(hwif, i);
ide_init_port_hw(hwif, &hw);
hwif->mmio = 1;
default_hwif_mmiops(hwif);
- ide_setup_dma(hwif, mem->start);
-
idx[0] = i;
ide_device_add(idx, &palm_bk3710_port_info);
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index b30adcf321c..babc1a5e128 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -17,11 +17,11 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
unsigned long port = (unsigned long)base;
int i;
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw->io_ports[i] = port;
+ for (i = 0; i <= 7; i++) {
+ hw->io_ports_array[i] = port;
port += sz;
}
- hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
+ hw->io_ports.ctl_addr = (unsigned long)ctrl;
hw->irq = irq;
}
@@ -44,7 +44,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
goto release;
}
- hwif = ide_find_port((unsigned long)base);
+ hwif = ide_find_port();
if (hwif) {
memset(&hw, 0, sizeof(hw));
rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
@@ -53,7 +53,6 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
ide_init_port_hw(hwif, &hw);
- hwif->mmio = 1;
default_hwif_mmiops(hwif);
idx[0] = hwif->index;
@@ -76,7 +75,7 @@ static void __devexit rapide_remove(struct expansion_card *ec)
ecard_set_drvdata(ec, NULL);
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
ecard_release_resources(ec);
}
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 31266d27809..9df26855bc0 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -88,8 +88,8 @@ enum /* Transfer types */
int
cris_ide_ack_intr(ide_hwif_t* hwif)
{
- reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2,
- int, hwif->io_ports[0]);
+ reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
+ hwif->io_ports.data_addr);
REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
return 1;
}
@@ -231,7 +231,7 @@ cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type,
ide_hwif_t *hwif = drive->hwif;
reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
- hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->io_ports.data_addr);
reg_ata_rw_trf_cnt trf_cnt = {0};
mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
@@ -271,7 +271,7 @@ static int cris_dma_test_irq(ide_drive_t *drive)
int intr = REG_RD_INT(ata, regi_ata, r_intr);
reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
- hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->io_ports.data_addr);
return intr & (1 << ctrl2.sel) ? 1 : 0;
}
@@ -531,7 +531,7 @@ static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int d
*R_ATA_CTRL_DATA =
cmd |
IO_FIELD(R_ATA_CTRL_DATA, data,
- drive->hwif->io_ports[IDE_DATA_OFFSET]) |
+ drive->hwif->io_ports.data_addr) |
IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
IO_STATE(R_ATA_CTRL_DATA, multi, on) |
IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
@@ -550,7 +550,7 @@ static int cris_dma_test_irq(ide_drive_t *drive)
{
int intr = *R_IRQ_MASK0_RD;
int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel,
- drive->hwif->io_ports[IDE_DATA_OFFSET]);
+ drive->hwif->io_ports.data_addr);
return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0;
}
@@ -644,7 +644,7 @@ cris_ide_inw(unsigned long reg) {
* call will also timeout on busy, but as long as the
* write is still performed, everything will be fine.
*/
- if (cris_ide_get_reg(reg) == IDE_STATUS_OFFSET)
+ if (cris_ide_get_reg(reg) == 7)
return BUSY_STAT;
else
/* For other rare cases we assume 0 is good enough. */
@@ -673,11 +673,6 @@ cris_ide_inb(unsigned long reg)
return (unsigned char)cris_ide_inw(reg);
}
-static int cris_dma_end (ide_drive_t *drive);
-static int cris_dma_setup (ide_drive_t *drive);
-static void cris_dma_exec_cmd (ide_drive_t *drive, u8 command);
-static int cris_dma_test_irq(ide_drive_t *drive);
-static void cris_dma_start(ide_drive_t *drive);
static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
@@ -770,20 +765,29 @@ static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base)
memset(hw, 0, sizeof(*hw));
for (i = 0; i <= 7; i++)
- hw->io_ports[i] = base + cris_ide_reg_addr(i, 0, 1);
+ hw->io_ports_array[i] = base + cris_ide_reg_addr(i, 0, 1);
/*
* the IDE control register is at ATA address 6,
* with CS1 active instead of CS0
*/
- hw->io_ports[IDE_CONTROL_OFFSET] = base + cris_ide_reg_addr(6, 1, 0);
+ hw->io_ports.ctl_addr = base + cris_ide_reg_addr(6, 1, 0);
hw->irq = ide_default_irq(0);
hw->ack_intr = cris_ide_ack_intr;
}
+static const struct ide_port_ops cris_port_ops = {
+ .set_pio_mode = cris_set_pio_mode,
+ .set_dma_mode = cris_set_dma_mode,
+};
+
+static const struct ide_dma_ops cris_dma_ops;
+
static const struct ide_port_info cris_port_info __initdata = {
.chipset = ide_etrax100,
+ .port_ops = &cris_port_ops,
+ .dma_ops = &cris_dma_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_NO_DMA, /* no SFF-style DMA */
.pio_mask = ATA_PIO4,
@@ -804,24 +808,16 @@ static int __init init_e100_ide(void)
cris_setup_ports(&hw, cris_ide_base_address(h));
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif == NULL)
continue;
ide_init_port_data(hwif, hwif->index);
ide_init_port_hw(hwif, &hw);
- hwif->mmio = 1;
- hwif->set_pio_mode = &cris_set_pio_mode;
- hwif->set_dma_mode = &cris_set_dma_mode;
+
hwif->ata_input_data = &cris_ide_input_data;
hwif->ata_output_data = &cris_ide_output_data;
hwif->atapi_input_bytes = &cris_atapi_input_bytes;
hwif->atapi_output_bytes = &cris_atapi_output_bytes;
- hwif->dma_host_set = &cris_dma_host_set;
- hwif->ide_dma_end = &cris_dma_end;
- hwif->dma_setup = &cris_dma_setup;
- hwif->dma_exec_cmd = &cris_dma_exec_cmd;
- hwif->ide_dma_test_irq = &cris_dma_test_irq;
- hwif->dma_start = &cris_dma_start;
hwif->OUTB = &cris_ide_outb;
hwif->OUTW = &cris_ide_outw;
hwif->OUTBSYNC = &cris_ide_outbsync;
@@ -1076,6 +1072,15 @@ static void cris_dma_start(ide_drive_t *drive)
}
}
+static const struct ide_dma_ops cris_dma_ops = {
+ .dma_host_set = cris_dma_host_set,
+ .dma_setup = cris_dma_setup,
+ .dma_exec_cmd = cris_dma_exec_cmd,
+ .dma_start = cris_dma_start,
+ .dma_end = cris_dma_end,
+ .dma_test_irq = cris_dma_test_irq,
+};
+
module_init(init_e100_ide);
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 4108ec4ffa7..fd23f12e17a 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -63,9 +63,9 @@ static inline void hw_setup(hw_regs_t *hw)
int i;
memset(hw, 0, sizeof(hw_regs_t));
- for (i = 0; i <= IDE_STATUS_OFFSET; i++)
- hw->io_ports[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
- hw->io_ports[IDE_CONTROL_OFFSET] = CONFIG_H8300_IDE_ALT;
+ for (i = 0; i <= 7; i++)
+ hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
+ hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
hw->chipset = ide_generic;
}
@@ -74,7 +74,6 @@ static inline void hwif_setup(ide_hwif_t *hwif)
{
default_hwif_iops(hwif);
- hwif->mmio = 1;
hwif->OUTW = mm_outw;
hwif->OUTSW = mm_outsw;
hwif->INW = mm_inw;
@@ -99,8 +98,7 @@ static int __init h8300_ide_init(void)
hw_setup(&hw);
- /* register if */
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif == NULL) {
printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
return -ENOENT;
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 0f6fb6b72dd..9d3601fa568 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -55,14 +55,22 @@ struct ide_acpi_hwif_link {
/* note: adds function name and KERN_DEBUG */
#ifdef DEBUGGING
#define DEBPRINT(fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, ## args)
+ printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
#else
#define DEBPRINT(fmt, args...) do {} while (0)
#endif /* DEBUGGING */
-extern int ide_noacpi;
-extern int ide_noacpitfs;
-extern int ide_noacpionboot;
+int ide_noacpi;
+module_param_named(noacpi, ide_noacpi, bool, 0);
+MODULE_PARM_DESC(noacpi, "disable IDE ACPI support");
+
+int ide_acpigtf;
+module_param_named(acpigtf, ide_acpigtf, bool, 0);
+MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support");
+
+int ide_acpionboot;
+module_param_named(acpionboot, ide_acpionboot, bool, 0);
+MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot");
static bool ide_noacpi_psx;
static int no_acpi_psx(const struct dmi_system_id *id)
@@ -309,7 +317,7 @@ static int do_drive_get_GTF(ide_drive_t *drive,
if (ACPI_FAILURE(status)) {
printk(KERN_DEBUG
"%s: Run _GTF error: status = 0x%x\n",
- __FUNCTION__, status);
+ __func__, status);
goto out;
}
@@ -335,7 +343,7 @@ static int do_drive_get_GTF(ide_drive_t *drive,
out_obj->buffer.length % REGS_PER_GTF) {
printk(KERN_ERR
"%s: unexpected GTF length (%d) or addr (0x%p)\n",
- __FUNCTION__, out_obj->buffer.length,
+ __func__, out_obj->buffer.length,
out_obj->buffer.pointer);
err = -ENOENT;
kfree(output.pointer);
@@ -376,7 +384,7 @@ static int taskfile_load_raw(ide_drive_t *drive,
memcpy(&args.tf_array[7], &gtf->tfa, 7);
args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
- if (ide_noacpitfs) {
+ if (!ide_acpigtf) {
DEBPRINT("_GTF execution disabled\n");
return err;
}
@@ -384,7 +392,7 @@ static int taskfile_load_raw(ide_drive_t *drive,
err = ide_no_data_taskfile(drive, &args);
if (err)
printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
- __FUNCTION__, err);
+ __func__, err);
return err;
}
@@ -422,7 +430,7 @@ static int do_drive_set_taskfiles(ide_drive_t *drive,
if (gtf_length % REGS_PER_GTF) {
printk(KERN_ERR "%s: unexpected GTF length (%d)\n",
- __FUNCTION__, gtf_length);
+ __func__, gtf_length);
goto out;
}
@@ -547,7 +555,7 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
printk(KERN_ERR
"%s: unexpected _GTM length (0x%x)[should be 0x%zx] or "
"addr (0x%p)\n",
- __FUNCTION__, out_obj->buffer.length,
+ __func__, out_obj->buffer.length,
sizeof(struct GTM_buffer), out_obj->buffer.pointer);
return;
}
@@ -721,7 +729,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
drive->name, err);
}
- if (ide_noacpionboot) {
+ if (!ide_acpionboot) {
DEBPRINT("ACPI methods disabled on boot\n");
return;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index fe5aefbf833..b34fd2bde96 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -13,8 +13,8 @@
*
* Suggestions are welcome. Patches that work are more welcome though. ;-)
* For those wishing to work on this driver, please be sure you download
- * and comply with the latest Mt. Fuji (SFF8090 version 4) and ATAPI
- * (SFF-8020i rev 2.6) standards. These documents can be obtained by
+ * and comply with the latest Mt. Fuji (SFF8090 version 4) and ATAPI
+ * (SFF-8020i rev 2.6) standards. These documents can be obtained by
* anonymous ftp from:
* ftp://fission.dt.wdc.com/pub/standards/SFF_atapi/spec/SFF8020-r2.6/PS/8020r26.ps
* ftp://ftp.avc-pioneer.com/Mtfuji4/Spec/Fuji4r10.pdf
@@ -39,19 +39,20 @@
#include <linux/mutex.h>
#include <linux/bcd.h>
-#include <scsi/scsi.h> /* For SCSI -> ATAPI command conversion */
+/* For SCSI -> ATAPI command conversion */
+#include <scsi/scsi.h>
-#include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/irq.h>
+#include <linux/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include "ide-cd.h"
static DEFINE_MUTEX(idecd_ref_mutex);
-#define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref)
+#define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref)
#define ide_cd_g(disk) \
container_of((disk)->private_data, struct cdrom_info, driver)
@@ -77,19 +78,17 @@ static void ide_cd_put(struct cdrom_info *cd)
mutex_unlock(&idecd_ref_mutex);
}
-/****************************************************************************
+/*
* Generic packet command support and error handling routines.
*/
-/* Mark that we've seen a media change, and invalidate our internal
- buffers. */
-static void cdrom_saw_media_change (ide_drive_t *drive)
+/* Mark that we've seen a media change and invalidate our internal buffers. */
+static void cdrom_saw_media_change(ide_drive_t *drive)
{
struct cdrom_info *cd = drive->driver_data;
cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED;
cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
- cd->nsectors_buffered = 0;
}
static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
@@ -101,44 +100,43 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
return 0;
switch (sense->sense_key) {
- case NO_SENSE: case RECOVERED_ERROR:
- break;
- case NOT_READY:
- /*
- * don't care about tray state messages for
- * e.g. capacity commands or in-progress or
- * becoming ready
- */
- if (sense->asc == 0x3a || sense->asc == 0x04)
- break;
- log = 1;
- break;
- case ILLEGAL_REQUEST:
- /*
- * don't log START_STOP unit with LoEj set, since
- * we cannot reliably check if drive can auto-close
- */
- if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
- break;
- log = 1;
- break;
- case UNIT_ATTENTION:
- /*
- * Make good and sure we've seen this potential media
- * change. Some drives (i.e. Creative) fail to present
- * the correct sense key in the error register.
- */
- cdrom_saw_media_change(drive);
+ case NO_SENSE:
+ case RECOVERED_ERROR:
+ break;
+ case NOT_READY:
+ /*
+ * don't care about tray state messages for e.g. capacity
+ * commands or in-progress or becoming ready
+ */
+ if (sense->asc == 0x3a || sense->asc == 0x04)
break;
- default:
- log = 1;
+ log = 1;
+ break;
+ case ILLEGAL_REQUEST:
+ /*
+ * don't log START_STOP unit with LoEj set, since we cannot
+ * reliably check if drive can auto-close
+ */
+ if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
break;
+ log = 1;
+ break;
+ case UNIT_ATTENTION:
+ /*
+ * Make good and sure we've seen this potential media change.
+ * Some drives (i.e. Creative) fail to present the correct sense
+ * key in the error register.
+ */
+ cdrom_saw_media_change(drive);
+ break;
+ default:
+ log = 1;
+ break;
}
return log;
}
-static
-void cdrom_analyze_sense_data(ide_drive_t *drive,
+static void cdrom_analyze_sense_data(ide_drive_t *drive,
struct request *failed_command,
struct request_sense *sense)
{
@@ -151,16 +149,17 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
return;
/*
- * If a read toc is executed for a CD-R or CD-RW medium where
- * the first toc has not been recorded yet, it will fail with
- * 05/24/00 (which is a confusing error)
+ * If a read toc is executed for a CD-R or CD-RW medium where the first
+ * toc has not been recorded yet, it will fail with 05/24/00 (which is a
+ * confusing error)
*/
if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
if (sense->sense_key == 0x05 && sense->asc == 0x24)
return;
- if (sense->error_code == 0x70) { /* Current Error */
- switch(sense->sense_key) {
+ /* current error */
+ if (sense->error_code == 0x70) {
+ switch (sense->sense_key) {
case MEDIUM_ERROR:
case VOLUME_OVERFLOW:
case ILLEGAL_REQUEST:
@@ -178,25 +177,23 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
if (bio_sectors < 4)
bio_sectors = 4;
if (drive->queue->hardsect_size == 2048)
- sector <<= 2; /* Device sector size is 2K */
- sector &= ~(bio_sectors -1);
+ /* device sector size is 2K */
+ sector <<= 2;
+ sector &= ~(bio_sectors - 1);
valid = (sector - failed_command->sector) << 9;
if (valid < 0)
valid = 0;
if (sector < get_capacity(info->disk) &&
- drive->probed_capacity - sector < 4 * 75) {
+ drive->probed_capacity - sector < 4 * 75)
set_capacity(info->disk, sector);
- }
- }
- }
+ }
+ }
ide_cd_log_error(drive->name, failed_command, sense);
}
-/*
- * Initialize a ide-cd packet command request
- */
+/* Initialize a ide-cd packet command request */
void ide_cd_init_rq(ide_drive_t *drive, struct request *rq)
{
struct cdrom_info *cd = drive->driver_data;
@@ -220,7 +217,8 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
rq->data = sense;
rq->cmd[0] = GPCMD_REQUEST_SENSE;
- rq->cmd[4] = rq->data_len = 18;
+ rq->cmd[4] = 18;
+ rq->data_len = 18;
rq->cmd_type = REQ_TYPE_SENSE;
@@ -230,7 +228,7 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
(void) ide_do_drive_cmd(drive, rq, ide_preempt);
}
-static void cdrom_end_request (ide_drive_t *drive, int uptodate)
+static void cdrom_end_request(ide_drive_t *drive, int uptodate)
{
struct request *rq = HWGROUP(drive)->rq;
int nsectors = rq->hard_cur_sectors;
@@ -252,7 +250,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
}
cdrom_analyze_sense_data(drive, failed, sense);
/*
- * now end failed request
+ * now end the failed request
*/
if (blk_fs_request(failed)) {
if (ide_end_dequeued_request(drive, failed, 0,
@@ -280,21 +278,24 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
ide_end_request(drive, uptodate, nsectors);
}
-static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 stat)
+static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
{
- if (stat & 0x80)
+ if (st & 0x80)
return;
- ide_dump_status(drive, msg, stat);
+ ide_dump_status(drive, msg, st);
}
-/* Returns 0 if the request should be continued.
- Returns 1 if the request was ended. */
+/*
+ * Returns:
+ * 0: if the request should be continued.
+ * 1: if the request was ended.
+ */
static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
{
struct request *rq = HWGROUP(drive)->rq;
int stat, err, sense_key;
-
- /* Check for errors. */
+
+ /* check for errors */
stat = ide_read_status(drive);
if (stat_ret)
@@ -303,20 +304,22 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
if (OK_STAT(stat, good_stat, BAD_R_STAT))
return 0;
- /* Get the IDE error register. */
+ /* get the IDE error register */
err = ide_read_error(drive);
sense_key = err >> 4;
if (rq == NULL) {
- printk("%s: missing rq in cdrom_decode_status\n", drive->name);
+ printk(KERN_ERR "%s: missing rq in %s\n",
+ drive->name, __func__);
return 1;
}
if (blk_sense_request(rq)) {
- /* We got an error trying to get sense info
- from the drive (probably while trying
- to recover from a former error). Just give up. */
-
+ /*
+ * We got an error trying to get sense info from the drive
+ * (probably while trying to recover from a former error).
+ * Just give up.
+ */
rq->cmd_flags |= REQ_FAILED;
cdrom_end_request(drive, 0);
ide_error(drive, "request sense failure", stat);
@@ -332,28 +335,27 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
if (blk_pc_request(rq) && !rq->errors)
rq->errors = SAM_STAT_CHECK_CONDITION;
- /* Check for tray open. */
+ /* check for tray open */
if (sense_key == NOT_READY) {
- cdrom_saw_media_change (drive);
+ cdrom_saw_media_change(drive);
} else if (sense_key == UNIT_ATTENTION) {
- /* Check for media change. */
- cdrom_saw_media_change (drive);
- /*printk("%s: media changed\n",drive->name);*/
+ /* check for media change */
+ cdrom_saw_media_change(drive);
return 0;
- } else if ((sense_key == ILLEGAL_REQUEST) &&
- (rq->cmd[0] == GPCMD_START_STOP_UNIT)) {
- /*
- * Don't print error message for this condition--
- * SFF8090i indicates that 5/24/00 is the correct
- * response to a request to close the tray if the
- * drive doesn't have that capability.
- * cdrom_log_sense() knows this!
- */
+ } else if (sense_key == ILLEGAL_REQUEST &&
+ rq->cmd[0] == GPCMD_START_STOP_UNIT) {
+ /*
+ * Don't print error message for this condition--
+ * SFF8090i indicates that 5/24/00 is the correct
+ * response to a request to close the tray if the
+ * drive doesn't have that capability.
+ * cdrom_log_sense() knows this!
+ */
} else if (!(rq->cmd_flags & REQ_QUIET)) {
- /* Otherwise, print an error. */
+ /* otherwise, print an error */
ide_dump_status(drive, "packet command error", stat);
}
-
+
rq->cmd_flags |= REQ_FAILED;
/*
@@ -366,27 +368,30 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
} else if (blk_fs_request(rq)) {
int do_end_request = 0;
- /* Handle errors from READ and WRITE requests. */
+ /* handle errors from READ and WRITE requests */
if (blk_noretry_request(rq))
do_end_request = 1;
if (sense_key == NOT_READY) {
- /* Tray open. */
+ /* tray open */
if (rq_data_dir(rq) == READ) {
- cdrom_saw_media_change (drive);
+ cdrom_saw_media_change(drive);
- /* Fail the request. */
- printk ("%s: tray open\n", drive->name);
+ /* fail the request */
+ printk(KERN_ERR "%s: tray open\n", drive->name);
do_end_request = 1;
} else {
struct cdrom_info *info = drive->driver_data;
- /* allow the drive 5 seconds to recover, some
+ /*
+ * Allow the drive 5 seconds to recover, some
* devices will return this error while flushing
- * data from cache */
+ * data from cache.
+ */
if (!rq->errors)
- info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY;
+ info->write_timeout = jiffies +
+ ATAPI_WAIT_WRITE_BUSY;
rq->errors = 1;
if (time_after(jiffies, info->write_timeout))
do_end_request = 1;
@@ -394,59 +399,68 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
unsigned long flags;
/*
- * take a breather relying on the
- * unplug timer to kick us again
+ * take a breather relying on the unplug
+ * timer to kick us again
*/
spin_lock_irqsave(&ide_lock, flags);
blk_plug_device(drive->queue);
- spin_unlock_irqrestore(&ide_lock,flags);
+ spin_unlock_irqrestore(&ide_lock,
+ flags);
return 1;
}
}
} else if (sense_key == UNIT_ATTENTION) {
- /* Media change. */
- cdrom_saw_media_change (drive);
+ /* media change */
+ cdrom_saw_media_change(drive);
- /* Arrange to retry the request.
- But be sure to give up if we've retried
- too many times. */
+ /*
+ * Arrange to retry the request but be sure to give up
+ * if we've retried too many times.
+ */
if (++rq->errors > ERROR_MAX)
do_end_request = 1;
} else if (sense_key == ILLEGAL_REQUEST ||
sense_key == DATA_PROTECT) {
- /* No point in retrying after an illegal
- request or data protect error.*/
- ide_dump_status_no_sense (drive, "command error", stat);
+ /*
+ * No point in retrying after an illegal request or data
+ * protect error.
+ */
+ ide_dump_status_no_sense(drive, "command error", stat);
do_end_request = 1;
} else if (sense_key == MEDIUM_ERROR) {
- /* No point in re-trying a zillion times on a bad
- * sector... If we got here the error is not correctable */
- ide_dump_status_no_sense (drive, "media error (bad sector)", stat);
+ /*
+ * No point in re-trying a zillion times on a bad
+ * sector. If we got here the error is not correctable.
+ */
+ ide_dump_status_no_sense(drive,
+ "media error (bad sector)",
+ stat);
do_end_request = 1;
} else if (sense_key == BLANK_CHECK) {
- /* Disk appears blank ?? */
- ide_dump_status_no_sense (drive, "media error (blank)", stat);
+ /* disk appears blank ?? */
+ ide_dump_status_no_sense(drive, "media error (blank)",
+ stat);
do_end_request = 1;
} else if ((err & ~ABRT_ERR) != 0) {
- /* Go to the default handler
- for other errors. */
+ /* go to the default handler for other errors */
ide_error(drive, "cdrom_decode_status", stat);
return 1;
} else if ((++rq->errors > ERROR_MAX)) {
- /* We've racked up too many retries. Abort. */
+ /* we've racked up too many retries, abort */
do_end_request = 1;
}
- /* End a request through request sense analysis when we have
- sense data. We need this in order to perform end of media
- processing */
-
+ /*
+ * End a request through request sense analysis when we have
+ * sense data. We need this in order to perform end of media
+ * processing.
+ */
if (do_end_request)
goto end_request;
/*
- * If we got a CHECK_CONDITION status,
- * queue a request sense command.
+ * If we got a CHECK_CONDITION status, queue
+ * a request sense command.
*/
if (stat & ERR_STAT)
cdrom_queue_request_sense(drive, NULL, NULL);
@@ -455,7 +469,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
cdrom_end_request(drive, 0);
}
- /* Retry, or handle the next request. */
+ /* retry, or handle the next request */
return 1;
end_request:
@@ -480,35 +494,37 @@ static int cdrom_timer_expiry(ide_drive_t *drive)
unsigned long wait = 0;
/*
- * Some commands are *slow* and normally take a long time to
- * complete. Usually we can use the ATAPI "disconnect" to bypass
- * this, but not all commands/drives support that. Let
- * ide_timer_expiry keep polling us for these.
+ * Some commands are *slow* and normally take a long time to complete.
+ * Usually we can use the ATAPI "disconnect" to bypass this, but not all
+ * commands/drives support that. Let ide_timer_expiry keep polling us
+ * for these.
*/
switch (rq->cmd[0]) {
- case GPCMD_BLANK:
- case GPCMD_FORMAT_UNIT:
- case GPCMD_RESERVE_RZONE_TRACK:
- case GPCMD_CLOSE_TRACK:
- case GPCMD_FLUSH_CACHE:
- wait = ATAPI_WAIT_PC;
- break;
- default:
- if (!(rq->cmd_flags & REQ_QUIET))
- printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]);
- wait = 0;
- break;
+ case GPCMD_BLANK:
+ case GPCMD_FORMAT_UNIT:
+ case GPCMD_RESERVE_RZONE_TRACK:
+ case GPCMD_CLOSE_TRACK:
+ case GPCMD_FLUSH_CACHE:
+ wait = ATAPI_WAIT_PC;
+ break;
+ default:
+ if (!(rq->cmd_flags & REQ_QUIET))
+ printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n",
+ rq->cmd[0]);
+ wait = 0;
+ break;
}
return wait;
}
-/* Set up the device registers for transferring a packet command on DEV,
- expecting to later transfer XFERLEN bytes. HANDLER is the routine
- which actually transfers the command to the drive. If this is a
- drq_interrupt device, this routine will arrange for HANDLER to be
- called when the interrupt from the drive arrives. Otherwise, HANDLER
- will be called immediately after the drive is prepared for the transfer. */
-
+/*
+ * Set up the device registers for transferring a packet command on DEV,
+ * expecting to later transfer XFERLEN bytes. HANDLER is the routine
+ * which actually transfers the command to the drive. If this is a
+ * drq_interrupt device, this routine will arrange for HANDLER to be
+ * called when the interrupt from the drive arrives. Otherwise, HANDLER
+ * will be called immediately after the drive is prepared for the transfer.
+ */
static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
int xferlen,
ide_handler_t *handler)
@@ -517,15 +533,15 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
struct cdrom_info *info = drive->driver_data;
ide_hwif_t *hwif = drive->hwif;
- /* Wait for the controller to be idle. */
+ /* wait for the controller to be idle */
if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY))
return startstop;
/* FIXME: for Virtual DMA we must check harder */
if (info->dma)
- info->dma = !hwif->dma_setup(drive);
+ info->dma = !hwif->dma_ops->dma_setup(drive);
- /* Set up the controller registers. */
+ /* set up the controller registers */
ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL |
IDE_TFLAG_NO_SELECT_MASK, xferlen, info->dma);
@@ -535,7 +551,8 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
drive->waiting_for_dma = 0;
/* packet command */
- ide_execute_command(drive, WIN_PACKETCMD, handler, ATAPI_WAIT_PC, cdrom_timer_expiry);
+ ide_execute_command(drive, WIN_PACKETCMD, handler,
+ ATAPI_WAIT_PC, cdrom_timer_expiry);
return ide_started;
} else {
unsigned long flags;
@@ -543,7 +560,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
/* packet command */
spin_lock_irqsave(&ide_lock, flags);
hwif->OUTBSYNC(drive, WIN_PACKETCMD,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->io_ports.command_addr);
ndelay(400);
spin_unlock_irqrestore(&ide_lock, flags);
@@ -551,13 +568,14 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
}
}
-/* Send a packet command to DRIVE described by CMD_BUF and CMD_LEN.
- The device registers must have already been prepared
- by cdrom_start_packet_command.
- HANDLER is the interrupt handler to call when the command completes
- or there's data ready. */
+/*
+ * Send a packet command to DRIVE described by CMD_BUF and CMD_LEN. The device
+ * registers must have already been prepared by cdrom_start_packet_command.
+ * HANDLER is the interrupt handler to call when the command completes or
+ * there's data ready.
+ */
#define ATAPI_MIN_CDB_BYTES 12
-static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
+static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
struct request *rq,
ide_handler_t *handler)
{
@@ -567,24 +585,26 @@ static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
ide_startstop_t startstop;
if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) {
- /* Here we should have been called after receiving an interrupt
- from the device. DRQ should how be set. */
+ /*
+ * Here we should have been called after receiving an interrupt
+ * from the device. DRQ should how be set.
+ */
- /* Check for errors. */
+ /* check for errors */
if (cdrom_decode_status(drive, DRQ_STAT, NULL))
return ide_stopped;
- /* Ok, next interrupt will be DMA interrupt. */
+ /* ok, next interrupt will be DMA interrupt */
if (info->dma)
drive->waiting_for_dma = 1;
} else {
- /* Otherwise, we must wait for DRQ to get set. */
+ /* otherwise, we must wait for DRQ to get set */
if (ide_wait_stat(&startstop, drive, DRQ_STAT,
BUSY_STAT, WAIT_READY))
return startstop;
}
- /* Arm the interrupt handler. */
+ /* arm the interrupt handler */
ide_set_handler(drive, handler, rq->timeout, cdrom_timer_expiry);
/* ATAPI commands get padded out to 12 bytes minimum */
@@ -592,20 +612,19 @@ static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
if (cmd_len < ATAPI_MIN_CDB_BYTES)
cmd_len = ATAPI_MIN_CDB_BYTES;
- /* Send the command to the device. */
+ /* send the command to the device */
HWIF(drive)->atapi_output_bytes(drive, rq->cmd, cmd_len);
- /* Start the DMA if need be */
+ /* start the DMA if need be */
if (info->dma)
- hwif->dma_start(drive);
+ hwif->dma_ops->dma_start(drive);
return ide_started;
}
-/****************************************************************************
+/*
* Block read functions.
*/
-
static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len)
{
while (len > 0) {
@@ -626,47 +645,6 @@ static void ide_cd_drain_data(ide_drive_t *drive, int nsects)
}
/*
- * Buffer up to SECTORS_TO_TRANSFER sectors from the drive in our sector
- * buffer. Once the first sector is added, any subsequent sectors are
- * assumed to be continuous (until the buffer is cleared). For the first
- * sector added, SECTOR is its sector number. (SECTOR is then ignored until
- * the buffer is cleared.)
- */
-static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
- int sectors_to_transfer)
-{
- struct cdrom_info *info = drive->driver_data;
-
- /* Number of sectors to read into the buffer. */
- int sectors_to_buffer = min_t(int, sectors_to_transfer,
- (SECTOR_BUFFER_SIZE >> SECTOR_BITS) -
- info->nsectors_buffered);
-
- char *dest;
-
- /* If we couldn't get a buffer, don't try to buffer anything... */
- if (info->buffer == NULL)
- sectors_to_buffer = 0;
-
- /* If this is the first sector in the buffer, remember its number. */
- if (info->nsectors_buffered == 0)
- info->sector_buffered = sector;
-
- /* Read the data into the buffer. */
- dest = info->buffer + info->nsectors_buffered * SECTOR_SIZE;
- while (sectors_to_buffer > 0) {
- HWIF(drive)->atapi_input_bytes(drive, dest, SECTOR_SIZE);
- --sectors_to_buffer;
- --sectors_to_transfer;
- ++info->nsectors_buffered;
- dest += SECTOR_SIZE;
- }
-
- /* Throw away any remaining data. */
- ide_cd_drain_data(drive, sectors_to_transfer);
-}
-
-/*
* Check the contents of the interrupt reason register from the cdrom
* and attempt to recover if there are problems. Returns 0 if everything's
* ok; nonzero if the request has been terminated.
@@ -684,22 +662,23 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
ide_hwif_t *hwif = drive->hwif;
xfer_func_t *xf;
- /* Whoops... */
+ /* whoops... */
printk(KERN_ERR "%s: %s: wrong transfer direction!\n",
- drive->name, __FUNCTION__);
+ drive->name, __func__);
xf = rw ? hwif->atapi_output_bytes : hwif->atapi_input_bytes;
ide_cd_pad_transfer(drive, xf, len);
} else if (rw == 0 && ireason == 1) {
- /* Some drives (ASUS) seem to tell us that status
- * info is available. just get it and ignore.
+ /*
+ * Some drives (ASUS) seem to tell us that status info is
+ * available. Just get it and ignore.
*/
(void)ide_read_status(drive);
return 0;
} else {
- /* Drive wants a command packet, or invalid ireason... */
+ /* drive wants a command packet, or invalid ireason... */
printk(KERN_ERR "%s: %s: bad interrupt reason 0x%02x\n",
- drive->name, __FUNCTION__, ireason);
+ drive->name, __func__, ireason);
}
if (rq->cmd_type == REQ_TYPE_ATA_PC)
@@ -721,7 +700,7 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
return 0;
printk(KERN_ERR "%s: %s: Bad transfer size %d\n",
- drive->name, __FUNCTION__, len);
+ drive->name, __func__, len);
if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES)
printk(KERN_ERR " This drive is not supported by "
@@ -734,72 +713,13 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
return 1;
}
-/*
- * Try to satisfy some of the current read request from our cached data.
- * Returns nonzero if the request has been completed, zero otherwise.
- */
-static int cdrom_read_from_buffer (ide_drive_t *drive)
-{
- struct cdrom_info *info = drive->driver_data;
- struct request *rq = HWGROUP(drive)->rq;
- unsigned short sectors_per_frame;
-
- sectors_per_frame = queue_hardsect_size(drive->queue) >> SECTOR_BITS;
-
- /* Can't do anything if there's no buffer. */
- if (info->buffer == NULL) return 0;
-
- /* Loop while this request needs data and the next block is present
- in our cache. */
- while (rq->nr_sectors > 0 &&
- rq->sector >= info->sector_buffered &&
- rq->sector < info->sector_buffered + info->nsectors_buffered) {
- if (rq->current_nr_sectors == 0)
- cdrom_end_request(drive, 1);
-
- memcpy (rq->buffer,
- info->buffer +
- (rq->sector - info->sector_buffered) * SECTOR_SIZE,
- SECTOR_SIZE);
- rq->buffer += SECTOR_SIZE;
- --rq->current_nr_sectors;
- --rq->nr_sectors;
- ++rq->sector;
- }
-
- /* If we've satisfied the current request,
- terminate it successfully. */
- if (rq->nr_sectors == 0) {
- cdrom_end_request(drive, 1);
- return -1;
- }
-
- /* Move on to the next buffer if needed. */
- if (rq->current_nr_sectors == 0)
- cdrom_end_request(drive, 1);
-
- /* If this condition does not hold, then the kluge i use to
- represent the number of sectors to skip at the start of a transfer
- will fail. I think that this will never happen, but let's be
- paranoid and check. */
- if (rq->current_nr_sectors < bio_cur_sectors(rq->bio) &&
- (rq->sector & (sectors_per_frame - 1))) {
- printk(KERN_ERR "%s: cdrom_read_from_buffer: buffer botch (%ld)\n",
- drive->name, (long)rq->sector);
- cdrom_end_request(drive, 0);
- return -1;
- }
-
- return 0;
-}
-
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *);
/*
- * Routine to send a read/write packet command to the drive.
- * This is usually called directly from cdrom_start_{read,write}().
- * However, for drq_interrupt devices, it is called from an interrupt
- * when the drive is ready to accept the command.
+ * Routine to send a read/write packet command to the drive. This is usually
+ * called directly from cdrom_start_{read,write}(). However, for drq_interrupt
+ * devices, it is called from an interrupt when the drive is ready to accept
+ * the command.
*/
static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
{
@@ -821,11 +741,11 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
* is larger than the buffer size.
*/
if (nskip > 0) {
- /* Sanity check... */
+ /* sanity check... */
if (rq->current_nr_sectors !=
bio_cur_sectors(rq->bio)) {
printk(KERN_ERR "%s: %s: buffer botch (%u)\n",
- drive->name, __FUNCTION__,
+ drive->name, __func__,
rq->current_nr_sectors);
cdrom_end_request(drive, 0);
return ide_stopped;
@@ -838,10 +758,10 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
/* the immediate bit */
rq->cmd[1] = 1 << 3;
#endif
- /* Set up the command */
+ /* set up the command */
rq->timeout = ATAPI_WAIT_PC;
- /* Send the command to the drive and return. */
+ /* send the command to the drive and return */
return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
}
@@ -849,7 +769,7 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
#define IDECD_SEEK_TIMER (5 * WAIT_MIN_SLEEP) /* 100 ms */
#define IDECD_SEEK_TIMEOUT (2 * WAIT_CMD) /* 20 sec */
-static ide_startstop_t cdrom_seek_intr (ide_drive_t *drive)
+static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
{
struct cdrom_info *info = drive->driver_data;
int stat;
@@ -861,19 +781,13 @@ static ide_startstop_t cdrom_seek_intr (ide_drive_t *drive)
info->cd_flags |= IDE_CD_FLAG_SEEKING;
if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
- if (--retry == 0) {
- /*
- * this condition is far too common, to bother
- * users about it
- */
- /* printk("%s: disabled DSC seek overlap\n", drive->name);*/
+ if (--retry == 0)
drive->dsc_overlap = 0;
- }
}
return ide_stopped;
}
-static ide_startstop_t cdrom_start_seek_continuation (ide_drive_t *drive)
+static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
{
struct request *rq = HWGROUP(drive)->rq;
sector_t frame = rq->sector;
@@ -888,36 +802,40 @@ static ide_startstop_t cdrom_start_seek_continuation (ide_drive_t *drive)
return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
}
-static ide_startstop_t cdrom_start_seek (ide_drive_t *drive, unsigned int block)
+static ide_startstop_t cdrom_start_seek(ide_drive_t *drive, unsigned int block)
{
struct cdrom_info *info = drive->driver_data;
info->dma = 0;
info->start_seek = jiffies;
- return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation);
+ return cdrom_start_packet_command(drive, 0,
+ cdrom_start_seek_continuation);
}
-/* Fix up a possibly partially-processed request so that we can
- start it over entirely, or even put it back on the request queue. */
-static void restore_request (struct request *rq)
+/*
+ * Fix up a possibly partially-processed request so that we can start it over
+ * entirely, or even put it back on the request queue.
+ */
+static void restore_request(struct request *rq)
{
if (rq->buffer != bio_data(rq->bio)) {
- sector_t n = (rq->buffer - (char *) bio_data(rq->bio)) / SECTOR_SIZE;
+ sector_t n =
+ (rq->buffer - (char *)bio_data(rq->bio)) / SECTOR_SIZE;
rq->buffer = bio_data(rq->bio);
rq->nr_sectors += n;
rq->sector -= n;
}
- rq->hard_cur_sectors = rq->current_nr_sectors = bio_cur_sectors(rq->bio);
+ rq->current_nr_sectors = bio_cur_sectors(rq->bio);
+ rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors;
rq->hard_sector = rq->sector;
rq->q->prep_rq_fn(rq->q, rq);
}
-/****************************************************************************
- * Execute all other packet commands.
+/*
+ * All other packet commands.
*/
-
static void ide_cd_request_sense_fixup(struct request *rq)
{
/*
@@ -941,7 +859,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, struct request *rq)
if (rq->sense == NULL)
rq->sense = &sense;
- /* Start of retry loop. */
+ /* start of retry loop */
do {
int error;
unsigned long time = jiffies;
@@ -950,41 +868,45 @@ int ide_cd_queue_pc(ide_drive_t *drive, struct request *rq)
error = ide_do_drive_cmd(drive, rq, ide_wait);
time = jiffies - time;
- /* FIXME: we should probably abort/retry or something
- * in case of failure */
+ /*
+ * FIXME: we should probably abort/retry or something in case of
+ * failure.
+ */
if (rq->cmd_flags & REQ_FAILED) {
- /* The request failed. Retry if it was due to a unit
- attention status
- (usually means media was changed). */
+ /*
+ * The request failed. Retry if it was due to a unit
+ * attention status (usually means media was changed).
+ */
struct request_sense *reqbuf = rq->sense;
if (reqbuf->sense_key == UNIT_ATTENTION)
cdrom_saw_media_change(drive);
else if (reqbuf->sense_key == NOT_READY &&
reqbuf->asc == 4 && reqbuf->ascq != 4) {
- /* The drive is in the process of loading
- a disk. Retry, but wait a little to give
- the drive time to complete the load. */
+ /*
+ * The drive is in the process of loading
+ * a disk. Retry, but wait a little to give
+ * the drive time to complete the load.
+ */
ssleep(2);
} else {
- /* Otherwise, don't retry. */
+ /* otherwise, don't retry */
retries = 0;
}
--retries;
}
- /* End of retry loop. */
+ /* end of retry loop */
} while ((rq->cmd_flags & REQ_FAILED) && retries >= 0);
- /* Return an error if the command failed. */
+ /* return an error if the command failed */
return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0;
}
/*
- * Called from blk_end_request_callback() after the data of the request
- * is completed and before the request is completed.
- * By returning value '1', blk_end_request_callback() returns immediately
- * without completing the request.
+ * Called from blk_end_request_callback() after the data of the request is
+ * completed and before the request itself is completed. By returning value '1',
+ * blk_end_request_callback() returns immediately without completing it.
*/
static int cdrom_newpc_intr_dummy_cb(struct request *rq)
{
@@ -1003,11 +925,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
unsigned int timeout;
u8 lowcyl, highcyl;
- /* Check for errors. */
+ /* check for errors */
dma = info->dma;
if (dma) {
info->dma = 0;
- dma_error = HWIF(drive)->ide_dma_end(drive);
+ dma_error = hwif->dma_ops->dma_end(drive);
if (dma_error) {
printk(KERN_ERR "%s: DMA %s error\n", drive->name,
write ? "write" : "read");
@@ -1018,9 +940,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
if (cdrom_decode_status(drive, 0, &stat))
return ide_stopped;
- /*
- * using dma, transfer is complete now
- */
+ /* using dma, transfer is complete now */
if (dma) {
if (dma_error)
return ide_error(drive, "dma error", stat);
@@ -1031,12 +951,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
goto end_request;
}
- /*
- * ok we fall to pio :/
- */
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]) & 0x3;
- lowcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
- highcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]);
+ /* ok we fall to pio :/ */
+ ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
+ lowcyl = hwif->INB(hwif->io_ports.lbam_addr);
+ highcyl = hwif->INB(hwif->io_ports.lbah_addr);
len = lowcyl + (256 * highcyl);
@@ -1044,9 +962,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
if (thislen > len)
thislen = len;
- /*
- * If DRQ is clear, the command has completed.
- */
+ /* If DRQ is clear, the command has completed. */
if ((stat & DRQ_STAT) == 0) {
if (blk_fs_request(rq)) {
/*
@@ -1057,7 +973,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
if (rq->current_nr_sectors > 0) {
printk(KERN_ERR "%s: %s: data underrun "
"(%d blocks)\n",
- drive->name, __FUNCTION__,
+ drive->name, __func__,
rq->current_nr_sectors);
if (!write)
rq->cmd_flags |= REQ_FAILED;
@@ -1067,15 +983,13 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
return ide_stopped;
} else if (!blk_pc_request(rq)) {
ide_cd_request_sense_fixup(rq);
- /* Complain if we still have data left to transfer. */
+ /* complain if we still have data left to transfer */
uptodate = rq->data_len ? 0 : 1;
}
goto end_request;
}
- /*
- * check which way to transfer data
- */
+ /* check which way to transfer data */
if (ide_cd_check_ireason(drive, rq, len, ireason, write))
return ide_stopped;
@@ -1111,16 +1025,12 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
xferfunc = HWIF(drive)->atapi_input_bytes;
}
- /*
- * transfer data
- */
+ /* transfer data */
while (thislen > 0) {
u8 *ptr = blk_fs_request(rq) ? NULL : rq->data;
int blen = rq->data_len;
- /*
- * bio backed?
- */
+ /* bio backed? */
if (rq->bio) {
if (blk_fs_request(rq)) {
ptr = rq->buffer;
@@ -1134,11 +1044,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
if (!ptr) {
if (blk_fs_request(rq) && !write)
/*
- * If the buffers are full, cache the rest
- * of the data in our internal buffer.
+ * If the buffers are full, pipe the rest into
+ * oblivion.
*/
- cdrom_buffer_sectors(drive, rq->sector,
- thislen >> 9);
+ ide_cd_drain_data(drive, thislen >> 9);
else {
printk(KERN_ERR "%s: confused, missing data\n",
drive->name);
@@ -1184,9 +1093,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
rq->sense_len += blen;
}
- /*
- * pad, if necessary
- */
+ /* pad, if necessary */
if (!blk_fs_request(rq) && len > 0)
ide_cd_pad_transfer(drive, xferfunc, len);
@@ -1230,9 +1137,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
queue_hardsect_size(drive->queue) >> SECTOR_BITS;
if (write) {
- /*
- * disk has become write protected
- */
+ /* disk has become write protected */
if (cd->disk->policy) {
cdrom_end_request(drive, 0);
return ide_stopped;
@@ -1243,15 +1148,9 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
* weirdness which might be present in the request packet.
*/
restore_request(rq);
-
- /* Satisfy whatever we can of this request from our cache. */
- if (cdrom_read_from_buffer(drive))
- return ide_stopped;
}
- /*
- * use DMA, if possible / writes *must* be hardware frame aligned
- */
+ /* use DMA, if possible / writes *must* be hardware frame aligned */
if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
(rq->sector & (sectors_per_frame - 1))) {
if (write) {
@@ -1262,13 +1161,10 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
} else
cd->dma = drive->using_dma;
- /* Clear the local sector buffer. */
- cd->nsectors_buffered = 0;
-
if (write)
cd->devinfo.media_written = 1;
- /* Start sending the read/write request to the drive. */
+ /* start sending the read/write request to the drive */
return cdrom_start_packet_command(drive, 32768, cdrom_start_rw_cont);
}
@@ -1293,12 +1189,11 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
info->dma = 0;
- /*
- * sg request
- */
+ /* sg request */
if (rq->bio) {
int mask = drive->queue->dma_alignment;
- unsigned long addr = (unsigned long) page_address(bio_page(rq->bio));
+ unsigned long addr =
+ (unsigned long)page_address(bio_page(rq->bio));
info->dma = drive->using_dma;
@@ -1312,15 +1207,16 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
info->dma = 0;
}
- /* Start sending the command to the drive. */
- return cdrom_start_packet_command(drive, rq->data_len, cdrom_do_newpc_cont);
+ /* start sending the command to the drive */
+ return cdrom_start_packet_command(drive, rq->data_len,
+ cdrom_do_newpc_cont);
}
-/****************************************************************************
+/*
* cdrom driver request routine.
*/
-static ide_startstop_t
-ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
+static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq,
+ sector_t block)
{
ide_startstop_t action;
struct cdrom_info *info = drive->driver_data;
@@ -1332,16 +1228,21 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
if ((stat & SEEK_STAT) != SEEK_STAT) {
if (elapsed < IDECD_SEEK_TIMEOUT) {
- ide_stall_queue(drive, IDECD_SEEK_TIMER);
+ ide_stall_queue(drive,
+ IDECD_SEEK_TIMER);
return ide_stopped;
}
- printk (KERN_ERR "%s: DSC timeout\n", drive->name);
+ printk(KERN_ERR "%s: DSC timeout\n",
+ drive->name);
}
info->cd_flags &= ~IDE_CD_FLAG_SEEKING;
}
- if ((rq_data_dir(rq) == READ) && IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) {
+ if (rq_data_dir(rq) == READ &&
+ IDE_LARGE_SEEK(info->last_block, block,
+ IDECD_SEEK_THRESHOLD) &&
+ drive->dsc_overlap)
action = cdrom_start_seek(drive, block);
- } else
+ else
action = cdrom_start_rw(drive, rq);
info->last_block = block;
return action;
@@ -1349,9 +1250,7 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
rq->cmd_type == REQ_TYPE_ATA_PC) {
return cdrom_do_block_pc(drive, rq);
} else if (blk_special_request(rq)) {
- /*
- * right now this can only be a reset...
- */
+ /* right now this can only be a reset... */
cdrom_end_request(drive, 1);
return ide_stopped;
}
@@ -1363,18 +1262,16 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
-/****************************************************************************
+/*
* Ioctl handling.
*
- * Routines which queue packet commands take as a final argument a pointer
- * to a request_sense struct. If execution of the command results
- * in an error with a CHECK CONDITION status, this structure will be filled
- * with the results of the subsequent request sense command. The pointer
- * can also be NULL, in which case no sense information is returned.
+ * Routines which queue packet commands take as a final argument a pointer to a
+ * request_sense struct. If execution of the command results in an error with a
+ * CHECK CONDITION status, this structure will be filled with the results of the
+ * subsequent request sense command. The pointer can also be NULL, in which case
+ * no sense information is returned.
*/
-
-static
-void msf_from_bcd (struct atapi_msf *msf)
+static void msf_from_bcd(struct atapi_msf *msf)
{
msf->minute = BCD2BIN(msf->minute);
msf->second = BCD2BIN(msf->second);
@@ -1394,8 +1291,8 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
req.cmd_flags |= REQ_QUIET;
/*
- * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to
- * switch CDs instead of supporting the LOAD_UNLOAD opcode.
+ * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to switch CDs
+ * instead of supporting the LOAD_UNLOAD opcode.
*/
req.cmd[7] = cdi->sanyo_slot % 3;
@@ -1471,36 +1368,39 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
unsigned long sectors_per_frame = SECTORS_PER_FRAME;
if (toc == NULL) {
- /* Try to allocate space. */
+ /* try to allocate space */
toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL);
if (toc == NULL) {
- printk (KERN_ERR "%s: No cdrom TOC buffer!\n", drive->name);
+ printk(KERN_ERR "%s: No cdrom TOC buffer!\n",
+ drive->name);
return -ENOMEM;
}
info->toc = toc;
}
- /* Check to see if the existing data is still valid.
- If it is, just return. */
+ /*
+ * Check to see if the existing data is still valid. If it is,
+ * just return.
+ */
(void) cdrom_check_status(drive, sense);
if (info->cd_flags & IDE_CD_FLAG_TOC_VALID)
return 0;
- /* Try to get the total cdrom capacity and sector size. */
+ /* try to get the total cdrom capacity and sector size */
stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame,
sense);
if (stat)
toc->capacity = 0x1fffff;
set_capacity(info->disk, toc->capacity * sectors_per_frame);
- /* Save a private copy of te TOC capacity for error handling */
+ /* save a private copy of the TOC capacity for error handling */
drive->probed_capacity = toc->capacity * sectors_per_frame;
blk_queue_hardsect_size(drive->queue,
sectors_per_frame << SECTOR_BITS);
- /* First read just the header, so we know how long the TOC is. */
+ /* first read just the header, so we know how long the TOC is */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
sizeof(struct atapi_toc_header), sense);
if (stat)
@@ -1517,7 +1417,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (ntracks > MAX_TRACKS)
ntracks = MAX_TRACKS;
- /* Now read the whole schmeer. */
+ /* now read the whole schmeer */
stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
(char *)&toc->hdr,
sizeof(struct atapi_toc_header) +
@@ -1525,15 +1425,18 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
sizeof(struct atapi_toc_entry), sense);
if (stat && toc->hdr.first_track > 1) {
- /* Cds with CDI tracks only don't have any TOC entries,
- despite of this the returned values are
- first_track == last_track = number of CDI tracks + 1,
- so that this case is indistinguishable from the same
- layout plus an additional audio track.
- If we get an error for the regular case, we assume
- a CDI without additional audio tracks. In this case
- the readable TOC is empty (CDI tracks are not included)
- and only holds the Leadout entry. Heiko Eißfeldt */
+ /*
+ * Cds with CDI tracks only don't have any TOC entries, despite
+ * of this the returned values are
+ * first_track == last_track = number of CDI tracks + 1,
+ * so that this case is indistinguishable from the same layout
+ * plus an additional audio track. If we get an error for the
+ * regular case, we assume a CDI without additional audio
+ * tracks. In this case the readable TOC is empty (CDI tracks
+ * are not included) and only holds the Leadout entry.
+ *
+ * Heiko Eißfeldt.
+ */
ntracks = 0;
stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
(char *)&toc->hdr,
@@ -1569,14 +1472,13 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
toc->ent[i].track = BCD2BIN(toc->ent[i].track);
msf_from_bcd(&toc->ent[i].addr.msf);
}
- toc->ent[i].addr.lba = msf_to_lba (toc->ent[i].addr.msf.minute,
- toc->ent[i].addr.msf.second,
- toc->ent[i].addr.msf.frame);
+ toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute,
+ toc->ent[i].addr.msf.second,
+ toc->ent[i].addr.msf.frame);
}
- /* Read the multisession information. */
if (toc->hdr.first_track != CDROM_LEADOUT) {
- /* Read the multisession information. */
+ /* read the multisession information */
stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
sizeof(ms_tmp), sense);
if (stat)
@@ -1584,26 +1486,27 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba);
} else {
- ms_tmp.hdr.first_track = ms_tmp.hdr.last_track = CDROM_LEADOUT;
+ ms_tmp.hdr.last_track = CDROM_LEADOUT;
+ ms_tmp.hdr.first_track = ms_tmp.hdr.last_track;
toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
}
if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) {
- /* Re-read multisession information using MSF format */
+ /* re-read multisession information using MSF format */
stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
sizeof(ms_tmp), sense);
if (stat)
return stat;
- msf_from_bcd (&ms_tmp.ent.addr.msf);
+ msf_from_bcd(&ms_tmp.ent.addr.msf);
toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute,
- ms_tmp.ent.addr.msf.second,
+ ms_tmp.ent.addr.msf.second,
ms_tmp.ent.addr.msf.frame);
}
toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
- /* Now try to get the total cdrom capacity. */
+ /* now try to get the total cdrom capacity */
stat = cdrom_get_last_written(cdi, &last_written);
if (!stat && (last_written > toc->capacity)) {
toc->capacity = last_written;
@@ -1628,7 +1531,8 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
- do { /* we seem to get stat=0x01,err=0x00 the first time (??) */
+ do {
+ /* we seem to get stat=0x01,err=0x00 the first time (??) */
stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (!stat)
break;
@@ -1679,7 +1583,7 @@ static struct cdrom_device_ops ide_cdrom_dops = {
.generic_packet = ide_cdrom_packet,
};
-static int ide_cdrom_register (ide_drive_t *drive, int nslots)
+static int ide_cdrom_register(ide_drive_t *drive, int nslots)
{
struct cdrom_info *info = drive->driver_data;
struct cdrom_device_info *devinfo = &info->devinfo;
@@ -1697,8 +1601,7 @@ static int ide_cdrom_register (ide_drive_t *drive, int nslots)
return register_cdrom(devinfo);
}
-static
-int ide_cdrom_probe_capabilities (ide_drive_t *drive)
+static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
{
struct cdrom_info *cd = drive->driver_data;
struct cdrom_device_info *cdi = &cd->devinfo;
@@ -1712,7 +1615,8 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
if (drive->media == ide_optical) {
cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM);
- printk(KERN_ERR "%s: ATAPI magneto-optical drive\n", drive->name);
+ printk(KERN_ERR "%s: ATAPI magneto-optical drive\n",
+ drive->name);
return nslots;
}
@@ -1723,11 +1627,10 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
}
/*
- * we have to cheat a little here. the packet will eventually
- * be queued with ide_cdrom_packet(), which extracts the
- * drive from cdi->handle. Since this device hasn't been
- * registered with the Uniform layer yet, it can't do this.
- * Same goes for cdi->ops.
+ * We have to cheat a little here. the packet will eventually be queued
+ * with ide_cdrom_packet(), which extracts the drive from cdi->handle.
+ * Since this device hasn't been registered with the Uniform layer yet,
+ * it can't do this. Same goes for cdi->ops.
*/
cdi->handle = drive;
cdi->ops = &ide_cdrom_dops;
@@ -1796,18 +1699,7 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
return nslots;
}
-#ifdef CONFIG_IDE_PROC_FS
-static void ide_cdrom_add_settings(ide_drive_t *drive)
-{
- ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
-}
-#else
-static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
-#endif
-
-/*
- * standard prep_rq_fn that builds 10 byte cmds
- */
+/* standard prep_rq_fn that builds 10 byte cmds */
static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{
int hard_sect = queue_hardsect_size(q);
@@ -1846,9 +1738,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
{
u8 *c = rq->cmd;
- /*
- * Transform 6-byte read/write commands to the 10-byte version
- */
+ /* transform 6-byte read/write commands to the 10-byte version */
if (c[0] == READ_6 || c[0] == WRITE_6) {
c[8] = c[4];
c[5] = c[3];
@@ -1870,7 +1760,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
rq->errors = ILLEGAL_REQUEST;
return BLKPREP_KILL;
}
-
+
return BLKPREP_OK;
}
@@ -1890,6 +1780,41 @@ struct cd_list_entry {
unsigned int cd_flags;
};
+#ifdef CONFIG_IDE_PROC_FS
+static sector_t ide_cdrom_capacity(ide_drive_t *drive)
+{
+ unsigned long capacity, sectors_per_frame;
+
+ if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
+ return 0;
+
+ return capacity * sectors_per_frame;
+}
+
+static int proc_idecd_read_capacity(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ ide_drive_t *drive = data;
+ int len;
+
+ len = sprintf(page, "%llu\n", (long long)ide_cdrom_capacity(drive));
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static ide_proc_entry_t idecd_proc[] = {
+ { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
+ { NULL, 0, NULL, NULL }
+};
+
+static void ide_cdrom_add_settings(ide_drive_t *drive)
+{
+ ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
+ &drive->dsc_overlap, NULL);
+}
+#else
+static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
+#endif
+
static const struct cd_list_entry ide_cd_quirks_list[] = {
/* Limit transfer size per interrupt. */
{ "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES },
@@ -1947,8 +1872,7 @@ static unsigned int ide_cd_flags(struct hd_driveid *id)
return 0;
}
-static
-int ide_cdrom_setup (ide_drive_t *drive)
+static int ide_cdrom_setup(ide_drive_t *drive)
{
struct cdrom_info *cd = drive->driver_data;
struct cdrom_device_info *cdi = &cd->devinfo;
@@ -1977,21 +1901,19 @@ int ide_cdrom_setup (ide_drive_t *drive)
id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD;
else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD)
- cdi->sanyo_slot = 3; /* 3 => use CD in slot 0 */
+ /* 3 => use CD in slot 0 */
+ cdi->sanyo_slot = 3;
- nslots = ide_cdrom_probe_capabilities (drive);
+ nslots = ide_cdrom_probe_capabilities(drive);
- /*
- * set correct block size
- */
+ /* set correct block size */
blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
- if (drive->autotune == IDE_TUNE_DEFAULT ||
- drive->autotune == IDE_TUNE_AUTO)
- drive->dsc_overlap = (drive->next != drive);
+ drive->dsc_overlap = (drive->next != drive);
if (ide_cdrom_register(drive, nslots)) {
- printk (KERN_ERR "%s: ide_cdrom_setup failed to register device with the cdrom driver.\n", drive->name);
+ printk(KERN_ERR "%s: %s failed to register device with the"
+ " cdrom driver.\n", drive->name, __func__);
cd->devinfo.handle = NULL;
return 1;
}
@@ -1999,19 +1921,6 @@ int ide_cdrom_setup (ide_drive_t *drive)
return 0;
}
-#ifdef CONFIG_IDE_PROC_FS
-static
-sector_t ide_cdrom_capacity (ide_drive_t *drive)
-{
- unsigned long capacity, sectors_per_frame;
-
- if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
- return 0;
-
- return capacity * sectors_per_frame;
-}
-#endif
-
static void ide_cd_remove(ide_drive_t *drive)
{
struct cdrom_info *info = drive->driver_data;
@@ -2030,7 +1939,6 @@ static void ide_cd_release(struct kref *kref)
ide_drive_t *drive = info->drive;
struct gendisk *g = info->disk;
- kfree(info->buffer);
kfree(info->toc);
if (devinfo->handle == drive)
unregister_cdrom(devinfo);
@@ -2044,23 +1952,6 @@ static void ide_cd_release(struct kref *kref)
static int ide_cd_probe(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
-static int proc_idecd_read_capacity
- (char *page, char **start, off_t off, int count, int *eof, void *data)
-{
- ide_drive_t *drive = data;
- int len;
-
- len = sprintf(page,"%llu\n", (long long)ide_cdrom_capacity(drive));
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
-}
-
-static ide_proc_entry_t idecd_proc[] = {
- { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
- { NULL, 0, NULL, NULL }
-};
-#endif
-
static ide_driver_t ide_cdrom_driver = {
.gen_driver = {
.owner = THIS_MODULE,
@@ -2081,20 +1972,17 @@ static ide_driver_t ide_cdrom_driver = {
#endif
};
-static int idecd_open(struct inode * inode, struct file * file)
+static int idecd_open(struct inode *inode, struct file *file)
{
struct gendisk *disk = inode->i_bdev->bd_disk;
struct cdrom_info *info;
int rc = -ENOMEM;
- if (!(info = ide_cd_get(disk)))
+ info = ide_cd_get(disk);
+ if (!info)
return -ENXIO;
- if (!info->buffer)
- info->buffer = kmalloc(SECTOR_BUFFER_SIZE, GFP_KERNEL|__GFP_REPEAT);
-
- if (info->buffer)
- rc = cdrom_open(&info->devinfo, inode, file);
+ rc = cdrom_open(&info->devinfo, inode, file);
if (rc < 0)
ide_cd_put(info);
@@ -2102,12 +1990,12 @@ static int idecd_open(struct inode * inode, struct file * file)
return rc;
}
-static int idecd_release(struct inode * inode, struct file * file)
+static int idecd_release(struct inode *inode, struct file *file)
{
struct gendisk *disk = inode->i_bdev->bd_disk;
struct cdrom_info *info = ide_cd_g(disk);
- cdrom_release (&info->devinfo, file);
+ cdrom_release(&info->devinfo, file);
ide_cd_put(info);
@@ -2139,7 +2027,7 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
struct packet_command cgc;
char buffer[16];
int stat;
- char spindown;
+ char spindown;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
@@ -2148,12 +2036,12 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
return stat;
spindown = buffer[11] & 0x0f;
- if (copy_to_user((void __user *)arg, &spindown, sizeof (char)))
+ if (copy_to_user((void __user *)arg, &spindown, sizeof(char)))
return -EFAULT;
return 0;
}
-static int idecd_ioctl (struct inode *inode, struct file *file,
+static int idecd_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct block_device *bdev = inode->i_bdev;
@@ -2161,13 +2049,13 @@ static int idecd_ioctl (struct inode *inode, struct file *file,
int err;
switch (cmd) {
- case CDROMSETSPINDOWN:
+ case CDROMSETSPINDOWN:
return idecd_set_spindown(&info->devinfo, arg);
- case CDROMGETSPINDOWN:
+ case CDROMGETSPINDOWN:
return idecd_get_spindown(&info->devinfo, arg);
default:
break;
- }
+ }
err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg);
if (err == -EINVAL)
@@ -2193,16 +2081,16 @@ static int idecd_revalidate_disk(struct gendisk *disk)
}
static struct block_device_operations idecd_ops = {
- .owner = THIS_MODULE,
- .open = idecd_open,
- .release = idecd_release,
- .ioctl = idecd_ioctl,
- .media_changed = idecd_media_changed,
- .revalidate_disk= idecd_revalidate_disk
+ .owner = THIS_MODULE,
+ .open = idecd_open,
+ .release = idecd_release,
+ .ioctl = idecd_ioctl,
+ .media_changed = idecd_media_changed,
+ .revalidate_disk = idecd_revalidate_disk
};
-/* options */
-static char *ignore = NULL;
+/* module options */
+static char *ignore;
module_param(ignore, charp, 0400);
MODULE_DESCRIPTION("ATAPI CD-ROM Driver");
@@ -2222,17 +2110,20 @@ static int ide_cd_probe(ide_drive_t *drive)
/* skip drives that we were told to ignore */
if (ignore != NULL) {
if (strstr(ignore, drive->name)) {
- printk(KERN_INFO "ide-cd: ignoring drive %s\n", drive->name);
+ printk(KERN_INFO "ide-cd: ignoring drive %s\n",
+ drive->name);
goto failed;
}
}
if (drive->scsi) {
- printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi emulation.\n", drive->name);
+ printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi "
+ "emulation.\n", drive->name);
goto failed;
}
info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL);
if (info == NULL) {
- printk(KERN_ERR "%s: Can't allocate a cdrom structure\n", drive->name);
+ printk(KERN_ERR "%s: Can't allocate a cdrom structure\n",
+ drive->name);
goto failed;
}
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 22e3751a681..a58801c4484 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -119,10 +119,6 @@ struct cdrom_info {
struct atapi_toc *toc;
- unsigned long sector_buffered;
- unsigned long nsectors_buffered;
- unsigned char *buffer;
-
/* The result of the last successful request sense command
on this device. */
struct request_sense sense_data;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 39501d13025..8e08d083fce 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -16,8 +16,6 @@
#define IDEDISK_VERSION "1.18"
-//#define DEBUG
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -88,7 +86,7 @@ static void ide_disk_put(struct ide_disk_obj *idkp)
*
* It is called only once for each drive.
*/
-static int lba_capacity_is_ok (struct hd_driveid *id)
+static int lba_capacity_is_ok(struct hd_driveid *id)
{
unsigned long lba_sects, chs_sects, head, tail;
@@ -176,7 +174,8 @@ static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
* __ide_do_rw_disk() issues READ and WRITE commands to a disk,
* using LBA if supported, or CHS otherwise, to address sectors.
*/
-static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block)
+static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+ sector_t block)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned int dma = drive->using_dma;
@@ -228,7 +227,8 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
tf->device = (block >> 8) & 0xf;
}
} else {
- unsigned int sect,head,cyl,track;
+ unsigned int sect, head, cyl, track;
+
track = (int)block / drive->sect;
sect = (int)block % drive->sect + 1;
head = track % drive->head;
@@ -271,7 +271,8 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
* 1073741822 == 549756 MB or 48bit addressing fake drive
*/
-static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector_t block)
+static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+ sector_t block)
{
ide_hwif_t *hwif = HWIF(drive);
@@ -452,7 +453,7 @@ static void idedisk_check_hpa(ide_drive_t *drive)
* in above order (i.e., if value of higher priority is available,
* reset will be ignored).
*/
-static void init_idedisk_capacity (ide_drive_t *drive)
+static void init_idedisk_capacity(ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
/*
@@ -479,7 +480,7 @@ static void init_idedisk_capacity (ide_drive_t *drive)
}
}
-static sector_t idedisk_capacity (ide_drive_t *drive)
+static sector_t idedisk_capacity(ide_drive_t *drive)
{
return drive->capacity64 - drive->sect0;
}
@@ -524,10 +525,11 @@ static int proc_idedisk_read_cache
int len;
if (drive->id_read)
- len = sprintf(out,"%i\n", drive->id->buf_size / 2);
+ len = sprintf(out, "%i\n", drive->id->buf_size / 2);
else
- len = sprintf(out,"(none)\n");
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ len = sprintf(out, "(none)\n");
+
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
static int proc_idedisk_read_capacity
@@ -536,54 +538,52 @@ static int proc_idedisk_read_capacity
ide_drive_t*drive = (ide_drive_t *)data;
int len;
- len = sprintf(page,"%llu\n", (long long)idedisk_capacity(drive));
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ len = sprintf(page, "%llu\n", (long long)idedisk_capacity(drive));
+
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
-static int proc_idedisk_read_smart_thresholds
- (char *page, char **start, off_t off, int count, int *eof, void *data)
+static int proc_idedisk_read_smart(char *page, char **start, off_t off,
+ int count, int *eof, void *data, u8 sub_cmd)
{
ide_drive_t *drive = (ide_drive_t *)data;
int len = 0, i = 0;
- if (get_smart_data(drive, page, SMART_READ_THRESHOLDS) == 0) {
+ if (get_smart_data(drive, page, sub_cmd) == 0) {
unsigned short *val = (unsigned short *) page;
char *out = ((char *)val) + (SECTOR_WORDS * 4);
page = out;
do {
- out += sprintf(out, "%04x%c", le16_to_cpu(*val), (++i & 7) ? ' ' : '\n');
+ out += sprintf(out, "%04x%c", le16_to_cpu(*val),
+ (++i & 7) ? ' ' : '\n');
val += 1;
} while (i < (SECTOR_WORDS * 2));
len = out - page;
}
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
-static int proc_idedisk_read_smart_values
+static int proc_idedisk_read_sv
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
- ide_drive_t *drive = (ide_drive_t *)data;
- int len = 0, i = 0;
+ return proc_idedisk_read_smart(page, start, off, count, eof, data,
+ SMART_READ_VALUES);
+}
- if (get_smart_data(drive, page, SMART_READ_VALUES) == 0) {
- unsigned short *val = (unsigned short *) page;
- char *out = ((char *)val) + (SECTOR_WORDS * 4);
- page = out;
- do {
- out += sprintf(out, "%04x%c", le16_to_cpu(*val), (++i & 7) ? ' ' : '\n');
- val += 1;
- } while (i < (SECTOR_WORDS * 2));
- len = out - page;
- }
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+static int proc_idedisk_read_st
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ return proc_idedisk_read_smart(page, start, off, count, eof, data,
+ SMART_READ_THRESHOLDS);
}
static ide_proc_entry_t idedisk_proc[] = {
- { "cache", S_IFREG|S_IRUGO, proc_idedisk_read_cache, NULL },
- { "capacity", S_IFREG|S_IRUGO, proc_idedisk_read_capacity, NULL },
- { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL },
- { "smart_values", S_IFREG|S_IRUSR, proc_idedisk_read_smart_values, NULL },
- { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL },
+ { "cache", S_IFREG|S_IRUGO, proc_idedisk_read_cache, NULL },
+ { "capacity", S_IFREG|S_IRUGO, proc_idedisk_read_capacity, NULL },
+ { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL },
+ { "smart_values", S_IFREG|S_IRUSR, proc_idedisk_read_sv, NULL },
+ { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_st, NULL },
{ NULL, 0, NULL, NULL }
};
#endif /* CONFIG_IDE_PROC_FS */
@@ -625,12 +625,13 @@ static int set_multcount(ide_drive_t *drive, int arg)
if (drive->special.b.set_multmode)
return -EBUSY;
- ide_init_drive_cmd (&rq);
+ ide_init_drive_cmd(&rq);
rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
drive->mult_req = arg;
drive->special.b.set_multmode = 1;
- (void) ide_do_drive_cmd (drive, &rq, ide_wait);
+ (void)ide_do_drive_cmd(drive, &rq, ide_wait);
+
return (drive->mult_count == arg) ? 0 : -EIO;
}
@@ -706,7 +707,7 @@ static int write_cache(ide_drive_t *drive, int arg)
return err;
}
-static int do_idedisk_flushcache (ide_drive_t *drive)
+static int do_idedisk_flushcache(ide_drive_t *drive)
{
ide_task_t args;
@@ -719,7 +720,7 @@ static int do_idedisk_flushcache (ide_drive_t *drive)
return ide_no_data_taskfile(drive, &args);
}
-static int set_acoustic (ide_drive_t *drive, int arg)
+static int set_acoustic(ide_drive_t *drive, int arg)
{
ide_task_t args;
@@ -753,7 +754,7 @@ static int set_lba_addressing(ide_drive_t *drive, int arg)
return 0;
if (!idedisk_supports_lba48(drive->id))
- return -EIO;
+ return -EIO;
drive->addressing = arg;
return 0;
}
@@ -763,23 +764,35 @@ static void idedisk_add_settings(ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
- ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL);
- ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
- ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
- ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1, &drive->addressing, set_lba_addressing);
- ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0, id->max_multsect, 1, 1, &drive->mult_count, set_multcount);
- ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr);
- ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL);
- ide_add_setting(drive, "wcache", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache);
- ide_add_setting(drive, "acoustic", SETTING_RW, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic);
- ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL);
- ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL);
+ ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 65535, 1, 1,
+ &drive->bios_cyl, NULL);
+ ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1,
+ &drive->bios_head, NULL);
+ ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1,
+ &drive->bios_sect, NULL);
+ ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1,
+ &drive->addressing, set_lba_addressing);
+ ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0,
+ id->max_multsect, 1, 1, &drive->mult_count,
+ set_multcount);
+ ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
+ &drive->nowerr, set_nowerr);
+ ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1,
+ &drive->lun, NULL);
+ ide_add_setting(drive, "wcache", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
+ &drive->wcache, write_cache);
+ ide_add_setting(drive, "acoustic", SETTING_RW, TYPE_BYTE, 0, 254, 1, 1,
+ &drive->acoustic, set_acoustic);
+ ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1,
+ &drive->failures, NULL);
+ ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535,
+ 1, 1, &drive->max_failures, NULL);
}
#else
static inline void idedisk_add_settings(ide_drive_t *drive) { ; }
#endif
-static void idedisk_setup (ide_drive_t *drive)
+static void idedisk_setup(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct hd_driveid *id = drive->id;
@@ -792,11 +805,10 @@ static void idedisk_setup (ide_drive_t *drive)
if (drive->removable) {
/*
- * Removable disks (eg. SYQUEST); ignore 'WD' drives
+ * Removable disks (eg. SYQUEST); ignore 'WD' drives
*/
- if (id->model[0] != 'W' || id->model[1] != 'D') {
+ if (id->model[0] != 'W' || id->model[1] != 'D')
drive->doorlocking = 1;
- }
}
(void)set_lba_addressing(drive, 1);
@@ -810,10 +822,11 @@ static void idedisk_setup (ide_drive_t *drive)
blk_queue_max_sectors(drive->queue, max_s);
}
- printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, drive->queue->max_sectors / 2);
+ printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
+ drive->queue->max_sectors / 2);
/* calculate drive capacity, and select LBA if possible */
- init_idedisk_capacity (drive);
+ init_idedisk_capacity(drive);
/* limit drive capacity to 137GB if LBA48 cannot be used */
if (drive->addressing == 0 && drive->capacity64 > 1ULL << 28) {
@@ -826,9 +839,9 @@ static void idedisk_setup (ide_drive_t *drive)
if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && drive->addressing) {
if (drive->capacity64 > 1ULL << 28) {
- printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode will"
- " be used for accessing sectors > %u\n",
- drive->name, 1 << 28);
+ printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
+ " will be used for accessing sectors "
+ "> %u\n", drive->name, 1 << 28);
} else
drive->addressing = 0;
}
@@ -837,7 +850,8 @@ static void idedisk_setup (ide_drive_t *drive)
* if possible, give fdisk access to more of the drive,
* by correcting bios_cyls:
*/
- capacity = idedisk_capacity (drive);
+ capacity = idedisk_capacity(drive);
+
if (!drive->forced_geom) {
if (idedisk_supports_lba48(drive->id)) {
@@ -993,7 +1007,8 @@ static int idedisk_open(struct inode *inode, struct file *filp)
struct ide_disk_obj *idkp;
ide_drive_t *drive;
- if (!(idkp = ide_disk_get(disk)))
+ idkp = ide_disk_get(disk);
+ if (idkp == NULL)
return -ENXIO;
drive = idkp->drive;
@@ -1115,13 +1130,13 @@ static int idedisk_revalidate_disk(struct gendisk *disk)
}
static struct block_device_operations idedisk_ops = {
- .owner = THIS_MODULE,
- .open = idedisk_open,
- .release = idedisk_release,
- .ioctl = idedisk_ioctl,
- .getgeo = idedisk_getgeo,
- .media_changed = idedisk_media_changed,
- .revalidate_disk= idedisk_revalidate_disk
+ .owner = THIS_MODULE,
+ .open = idedisk_open,
+ .release = idedisk_release,
+ .ioctl = idedisk_ioctl,
+ .getgeo = idedisk_getgeo,
+ .media_changed = idedisk_media_changed,
+ .revalidate_disk = idedisk_revalidate_disk
};
MODULE_DESCRIPTION("ATA DISK Driver");
@@ -1184,7 +1199,7 @@ failed:
return -ENODEV;
}
-static void __exit idedisk_exit (void)
+static void __exit idedisk_exit(void)
{
driver_unregister(&idedisk_driver.gen_driver);
}
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index d61e5788d31..c352cf27b6e 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -102,7 +102,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
{
u8 stat = 0, dma_stat = 0;
- dma_stat = HWIF(drive)->ide_dma_end(drive);
+ dma_stat = drive->hwif->dma_ops->dma_end(drive);
stat = ide_read_status(drive);
if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
@@ -394,7 +394,7 @@ void ide_dma_off_quietly(ide_drive_t *drive)
drive->using_dma = 0;
ide_toggle_bounce(drive, 0);
- drive->hwif->dma_host_set(drive, 0);
+ drive->hwif->dma_ops->dma_host_set(drive, 0);
}
EXPORT_SYMBOL(ide_dma_off_quietly);
@@ -427,7 +427,7 @@ void ide_dma_on(ide_drive_t *drive)
drive->using_dma = 1;
ide_toggle_bounce(drive, 1);
- drive->hwif->dma_host_set(drive, 1);
+ drive->hwif->dma_ops->dma_host_set(drive, 1);
}
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
@@ -482,11 +482,12 @@ int ide_dma_setup(ide_drive_t *drive)
EXPORT_SYMBOL_GPL(ide_dma_setup);
-static void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
+void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
{
/* issue cmd to drive */
ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
}
+EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
void ide_dma_start(ide_drive_t *drive)
{
@@ -532,7 +533,7 @@ int __ide_dma_end (ide_drive_t *drive)
EXPORT_SYMBOL(__ide_dma_end);
/* returns 1 if dma irq issued, 0 otherwise */
-static int __ide_dma_test_irq(ide_drive_t *drive)
+int ide_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
u8 dma_stat = hwif->INB(hwif->dma_status);
@@ -542,9 +543,10 @@ static int __ide_dma_test_irq(ide_drive_t *drive)
return 1;
if (!drive->waiting_for_dma)
printk(KERN_WARNING "%s: (%s) called while not waiting\n",
- drive->name, __FUNCTION__);
+ drive->name, __func__);
return 0;
}
+EXPORT_SYMBOL_GPL(ide_dma_test_irq);
#else
static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
@@ -574,6 +576,7 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
{
struct hd_driveid *id = drive->id;
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
unsigned int mask = 0;
switch(base) {
@@ -581,8 +584,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
if ((id->field_valid & 4) == 0)
break;
- if (hwif->udma_filter)
- mask = hwif->udma_filter(drive);
+ if (port_ops && port_ops->udma_filter)
+ mask = port_ops->udma_filter(drive);
else
mask = hwif->ultra_mask;
mask &= id->dma_ultra;
@@ -598,8 +601,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
case XFER_MW_DMA_0:
if ((id->field_valid & 2) == 0)
break;
- if (hwif->mdma_filter)
- mask = hwif->mdma_filter(drive);
+ if (port_ops && port_ops->mdma_filter)
+ mask = port_ops->mdma_filter(drive);
else
mask = hwif->mwdma_mask;
mask &= id->dma_mword;
@@ -703,17 +706,8 @@ static int ide_tune_dma(ide_drive_t *drive)
speed = ide_max_dma_mode(drive);
- if (!speed) {
- /* is this really correct/needed? */
- if ((hwif->host_flags & IDE_HFLAG_CY82C693) &&
- ide_dma_good_drive(drive))
- return 1;
- else
- return 0;
- }
-
- if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
- return 1;
+ if (!speed)
+ return 0;
if (ide_set_dma_mode(drive, speed))
return 0;
@@ -810,15 +804,15 @@ void ide_dma_timeout (ide_drive_t *drive)
printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
- if (hwif->ide_dma_test_irq(drive))
+ if (hwif->dma_ops->dma_test_irq(drive))
return;
- hwif->ide_dma_end(drive);
+ hwif->dma_ops->dma_end(drive);
}
EXPORT_SYMBOL(ide_dma_timeout);
-static void ide_release_dma_engine(ide_hwif_t *hwif)
+void ide_release_dma_engine(ide_hwif_t *hwif)
{
if (hwif->dmatable_cpu) {
struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -829,28 +823,7 @@ static void ide_release_dma_engine(ide_hwif_t *hwif)
}
}
-static int ide_release_iomio_dma(ide_hwif_t *hwif)
-{
- release_region(hwif->dma_base, 8);
- if (hwif->extra_ports)
- release_region(hwif->extra_base, hwif->extra_ports);
- return 1;
-}
-
-/*
- * Needed for allowing full modular support of ide-driver
- */
-int ide_release_dma(ide_hwif_t *hwif)
-{
- ide_release_dma_engine(hwif);
-
- if (hwif->mmio)
- return 1;
- else
- return ide_release_iomio_dma(hwif);
-}
-
-static int ide_allocate_dma_engine(ide_hwif_t *hwif)
+int ide_allocate_dma_engine(ide_hwif_t *hwif)
{
struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -862,65 +835,25 @@ static int ide_allocate_dma_engine(ide_hwif_t *hwif)
return 0;
printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
- hwif->cds->name);
+ hwif->name);
return 1;
}
-
-static int ide_mapped_mmio_dma(ide_hwif_t *hwif, unsigned long base)
-{
- printk(KERN_INFO " %s: MMIO-DMA ", hwif->name);
-
- return 0;
-}
-
-static int ide_iomio_dma(ide_hwif_t *hwif, unsigned long base)
-{
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx",
- hwif->name, base, base + 7);
-
- if (!request_region(base, 8, hwif->name)) {
- printk(" -- Error, ports in use.\n");
- return 1;
- }
-
- if (hwif->cds->extra) {
- hwif->extra_base = base + (hwif->channel ? 8 : 16);
-
- if (!hwif->mate || !hwif->mate->extra_ports) {
- if (!request_region(hwif->extra_base,
- hwif->cds->extra, hwif->cds->name)) {
- printk(" -- Error, extra ports in use.\n");
- release_region(base, 8);
- return 1;
- }
- hwif->extra_ports = hwif->cds->extra;
- }
- }
-
- return 0;
-}
-
-static int ide_dma_iobase(ide_hwif_t *hwif, unsigned long base)
-{
- if (hwif->mmio)
- return ide_mapped_mmio_dma(hwif, base);
-
- return ide_iomio_dma(hwif, base);
-}
+EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
+
+static const struct ide_dma_ops sff_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = __ide_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_timeout = ide_dma_timeout,
+ .dma_lost_irq = ide_dma_lost_irq,
+};
void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
{
- u8 dma_stat;
-
- if (ide_dma_iobase(hwif, base))
- return;
-
- if (ide_allocate_dma_engine(hwif)) {
- ide_release_dma(hwif);
- return;
- }
-
hwif->dma_base = base;
if (!hwif->dma_command)
@@ -934,27 +867,7 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
if (!hwif->dma_prdtable)
hwif->dma_prdtable = hwif->dma_base + 4;
- if (!hwif->dma_host_set)
- hwif->dma_host_set = &ide_dma_host_set;
- if (!hwif->dma_setup)
- hwif->dma_setup = &ide_dma_setup;
- if (!hwif->dma_exec_cmd)
- hwif->dma_exec_cmd = &ide_dma_exec_cmd;
- if (!hwif->dma_start)
- hwif->dma_start = &ide_dma_start;
- if (!hwif->ide_dma_end)
- hwif->ide_dma_end = &__ide_dma_end;
- if (!hwif->ide_dma_test_irq)
- hwif->ide_dma_test_irq = &__ide_dma_test_irq;
- if (!hwif->dma_timeout)
- hwif->dma_timeout = &ide_dma_timeout;
- if (!hwif->dma_lost_irq)
- hwif->dma_lost_irq = &ide_dma_lost_irq;
-
- dma_stat = hwif->INB(hwif->dma_status);
- printk(KERN_CONT ", BIOS settings: %s:%s, %s:%s\n",
- hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "PIO",
- hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "PIO");
+ hwif->dma_ops = &sff_dma_ops;
}
EXPORT_SYMBOL_GPL(ide_setup_dma);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 5f133dfb541..489079b8ed0 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -396,7 +396,7 @@ static void idefloppy_retry_pc(ide_drive_t *drive)
}
/* The usual interrupt handler called during a packet command. */
-static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
+static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
{
idefloppy_floppy_t *floppy = drive->driver_data;
ide_hwif_t *hwif = drive->hwif;
@@ -411,7 +411,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
debug_log("Reached %s interrupt handler\n", __func__);
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
- dma_error = hwif->ide_dma_end(drive);
+ dma_error = hwif->dma_ops->dma_end(drive);
if (dma_error) {
printk(KERN_ERR "%s: DMA %s error\n", drive->name,
rq_data_dir(rq) ? "write" : "read");
@@ -465,10 +465,10 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
}
/* Get the number of bytes to transfer */
- bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
- hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
+ bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
+ hwif->INB(hwif->io_ports.lbam_addr);
/* on this interrupt */
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if (ireason & CD) {
printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__);
@@ -539,7 +539,7 @@ static ide_startstop_t idefloppy_transfer_pc(ide_drive_t *drive)
"initiated yet DRQ isn't asserted\n");
return startstop;
}
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if ((ireason & CD) == 0 || (ireason & IO)) {
printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while "
"issuing a packet command\n");
@@ -586,7 +586,7 @@ static ide_startstop_t idefloppy_transfer_pc1(ide_drive_t *drive)
"initiated yet DRQ isn't asserted\n");
return startstop;
}
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if ((ireason & CD) == 0 || (ireason & IO)) {
printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) "
"while issuing a packet command\n");
@@ -663,7 +663,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
dma = 0;
if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
- dma = !hwif->dma_setup(drive);
+ dma = !hwif->dma_ops->dma_setup(drive);
ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
IDE_TFLAG_OUT_DEVICE, bcount, dma);
@@ -671,7 +671,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
if (dma) {
/* Begin DMA, if necessary */
pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
- hwif->dma_start(drive);
+ hwif->dma_ops->dma_start(drive);
}
/* Can we transfer the packet when we get the interrupt or wait? */
@@ -692,7 +692,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
return ide_started;
} else {
/* Issue the packet command */
- hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
return (*pkt_xfer_routine) (drive);
}
}
@@ -1596,13 +1596,13 @@ static int idefloppy_revalidate_disk(struct gendisk *disk)
}
static struct block_device_operations idefloppy_ops = {
- .owner = THIS_MODULE,
- .open = idefloppy_open,
- .release = idefloppy_release,
- .ioctl = idefloppy_ioctl,
- .getgeo = idefloppy_getgeo,
- .media_changed = idefloppy_media_changed,
- .revalidate_disk= idefloppy_revalidate_disk
+ .owner = THIS_MODULE,
+ .open = idefloppy_open,
+ .release = idefloppy_release,
+ .ioctl = idefloppy_ioctl,
+ .getgeo = idefloppy_getgeo,
+ .media_changed = idefloppy_media_changed,
+ .revalidate_disk = idefloppy_revalidate_disk
};
static int ide_floppy_probe(ide_drive_t *drive)
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 25fda0a3263..a6073e248f4 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -33,7 +33,7 @@ static ssize_t store_add(struct class *cls, const char *buf, size_t n)
if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
return -EINVAL;
- hwif = ide_find_port(base);
+ hwif = ide_find_port();
if (hwif == NULL)
return -ENOENT;
@@ -90,19 +90,45 @@ static int __init ide_generic_init(void)
int i;
for (i = 0; i < MAX_HWIFS; i++) {
- ide_hwif_t *hwif = &ide_hwifs[i];
+ ide_hwif_t *hwif;
unsigned long io_addr = ide_default_io_base(i);
hw_regs_t hw;
- if (hwif->chipset == ide_unknown && io_addr) {
+ idx[i] = 0xff;
+
+ if (io_addr) {
+ if (!request_region(io_addr, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX "
+ "not free.\n",
+ DRV_NAME, io_addr, io_addr + 7);
+ continue;
+ }
+
+ if (!request_region(io_addr + 0x206, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX "
+ "not free.\n",
+ DRV_NAME, io_addr + 0x206);
+ release_region(io_addr, 8);
+ continue;
+ }
+
+ /*
+ * Skip probing if the corresponding
+ * slot is already occupied.
+ */
+ hwif = ide_find_port();
+ if (hwif == NULL || hwif->index != i) {
+ idx[i] = 0xff;
+ continue;
+ }
+
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
hw.irq = ide_default_irq(io_addr);
ide_init_port_hw(hwif, &hw);
idx[i] = i;
- } else
- idx[i] = 0xff;
+ }
}
ide_device_add_all(idx, NULL);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 31e5afadb7e..3a2d8930d17 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -218,7 +218,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
* we could be smarter and check for current xfer_speed
* in struct drive etc...
*/
- if (drive->hwif->dma_host_set == NULL)
+ if (drive->hwif->dma_ops == NULL)
break;
/*
* TODO: respect ->using_dma setting
@@ -298,48 +298,43 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
{
ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
struct ide_taskfile *tf = &task->tf;
if (task->tf_flags & IDE_TFLAG_IN_DATA) {
- u16 data = hwif->INW(hwif->io_ports[IDE_DATA_OFFSET]);
+ u16 data = hwif->INW(io_ports->data_addr);
tf->data = data & 0xff;
tf->hob_data = (data >> 8) & 0xff;
}
/* be sure we're looking at the low order bits */
- hwif->OUTB(drive->ctl & ~0x80, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTB(drive->ctl & ~0x80, io_ports->ctl_addr);
if (task->tf_flags & IDE_TFLAG_IN_NSECT)
- tf->nsect = hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
+ tf->nsect = hwif->INB(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAL)
- tf->lbal = hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
+ tf->lbal = hwif->INB(io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAM)
- tf->lbam = hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
+ tf->lbam = hwif->INB(io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAH)
- tf->lbah = hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
+ tf->lbah = hwif->INB(io_ports->lbah_addr);
if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
- tf->device = hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]);
+ tf->device = hwif->INB(io_ports->device_addr);
if (task->tf_flags & IDE_TFLAG_LBA48) {
- hwif->OUTB(drive->ctl | 0x80,
- hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTB(drive->ctl | 0x80, io_ports->ctl_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
- tf->hob_feature =
- hwif->INB(hwif->io_ports[IDE_FEATURE_OFFSET]);
+ tf->hob_feature = hwif->INB(io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
- tf->hob_nsect =
- hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
+ tf->hob_nsect = hwif->INB(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
- tf->hob_lbal =
- hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
+ tf->hob_lbal = hwif->INB(io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
- tf->hob_lbam =
- hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
+ tf->hob_lbam = hwif->INB(io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
- tf->hob_lbah =
- hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
+ tf->hob_lbah = hwif->INB(io_ports->lbah_addr);
}
}
@@ -454,7 +449,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
if (err == ABRT_ERR) {
if (drive->select.b.lba &&
/* some newer drives don't support WIN_SPECIFY */
- hwif->INB(hwif->io_ports[IDE_COMMAND_OFFSET]) ==
+ hwif->INB(hwif->io_ports.command_addr) ==
WIN_SPECIFY)
return ide_stopped;
} else if ((err & BAD_CRC) == BAD_CRC) {
@@ -507,8 +502,7 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
- hwif->OUTB(WIN_IDLEIMMEDIATE,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
if (rq->errors >= ERROR_MAX) {
ide_kill_rq(drive, rq);
@@ -721,15 +715,12 @@ static ide_startstop_t do_special (ide_drive_t *drive)
#endif
if (s->b.set_tune) {
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
u8 req_pio = drive->tune_req;
s->b.set_tune = 0;
if (set_pio_mode_abuse(drive->hwif, req_pio)) {
-
- if (hwif->set_pio_mode == NULL)
- return ide_stopped;
-
/*
* take ide_lock for drive->[no_]unmask/[no_]io_32bit
*/
@@ -737,10 +728,10 @@ static ide_startstop_t do_special (ide_drive_t *drive)
unsigned long flags;
spin_lock_irqsave(&ide_lock, flags);
- hwif->set_pio_mode(drive, req_pio);
+ port_ops->set_pio_mode(drive, req_pio);
spin_unlock_irqrestore(&ide_lock, flags);
} else
- hwif->set_pio_mode(drive, req_pio);
+ port_ops->set_pio_mode(drive, req_pio);
} else {
int keep_dma = drive->using_dma;
@@ -1241,12 +1232,12 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
if (error < 0) {
printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
- (void)HWIF(drive)->ide_dma_end(drive);
+ (void)hwif->dma_ops->dma_end(drive);
ret = ide_error(drive, "dma timeout error",
ide_read_status(drive));
} else {
printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
- hwif->dma_timeout(drive);
+ hwif->dma_ops->dma_timeout(drive);
}
/*
@@ -1358,7 +1349,7 @@ void ide_timer_expiry (unsigned long data)
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
if (drive->waiting_for_dma)
- hwgroup->hwif->dma_lost_irq(drive);
+ hwif->dma_ops->dma_lost_irq(drive);
(void)ide_ack_intr(hwif);
printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
startstop = handler(drive);
@@ -1424,7 +1415,7 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
*/
do {
if (hwif->irq == irq) {
- stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = hwif->INB(hwif->io_ports.status_addr);
if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
/* Try to not flood the console with msgs */
static unsigned long last_msgtime, count;
@@ -1514,7 +1505,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
* Whack the status register, just in case
* we have a leftover pending IRQ.
*/
- (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ (void) hwif->INB(hwif->io_ports.status_addr);
#endif /* CONFIG_BLK_DEV_IDEPCI */
}
spin_unlock_irqrestore(&ide_lock, flags);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 45944219eea..5425d3038ec 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -159,17 +159,20 @@ EXPORT_SYMBOL(default_hwif_mmiops);
void SELECT_DRIVE (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
- if (hwif->selectproc)
- hwif->selectproc(drive);
+ if (port_ops && port_ops->selectproc)
+ port_ops->selectproc(drive);
- hwif->OUTB(drive->select.all, hwif->io_ports[IDE_SELECT_OFFSET]);
+ hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
}
void SELECT_MASK (ide_drive_t *drive, int mask)
{
- if (HWIF(drive)->maskproc)
- HWIF(drive)->maskproc(drive, mask);
+ const struct ide_port_ops *port_ops = drive->hwif->port_ops;
+
+ if (port_ops && port_ops->maskproc)
+ port_ops->maskproc(drive, mask);
}
/*
@@ -191,24 +194,22 @@ static void ata_vlb_sync(ide_drive_t *drive, unsigned long port)
*/
static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 io_32bit = drive->io_32bit;
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ u8 io_32bit = drive->io_32bit;
if (io_32bit) {
if (io_32bit & 2) {
unsigned long flags;
local_irq_save(flags);
- ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]);
- hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount);
+ ata_vlb_sync(drive, io_ports->nsect_addr);
+ hwif->INSL(io_ports->data_addr, buffer, wcount);
local_irq_restore(flags);
} else
- hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount);
+ hwif->INSL(io_ports->data_addr, buffer, wcount);
} else
- hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount << 1);
+ hwif->INSW(io_ports->data_addr, buffer, wcount << 1);
}
/*
@@ -216,24 +217,22 @@ static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
*/
static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 io_32bit = drive->io_32bit;
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ u8 io_32bit = drive->io_32bit;
if (io_32bit) {
if (io_32bit & 2) {
unsigned long flags;
local_irq_save(flags);
- ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]);
- hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount);
+ ata_vlb_sync(drive, io_ports->nsect_addr);
+ hwif->OUTSL(io_ports->data_addr, buffer, wcount);
local_irq_restore(flags);
} else
- hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount);
+ hwif->OUTSL(io_ports->data_addr, buffer, wcount);
} else
- hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- wcount << 1);
+ hwif->OUTSW(io_ports->data_addr, buffer, wcount << 1);
}
/*
@@ -252,14 +251,13 @@ static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
if (MACH_IS_ATARI || MACH_IS_Q40) {
/* Atari has a byte-swapped IDE interface */
- insw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- bytecount / 2);
+ insw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
return;
}
#endif /* CONFIG_ATARI || CONFIG_Q40 */
hwif->ata_input_data(drive, buffer, bytecount / 4);
if ((bytecount & 0x03) >= 2)
- hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET],
+ hwif->INSW(hwif->io_ports.data_addr,
(u8 *)buffer + (bytecount & ~0x03), 1);
}
@@ -271,14 +269,13 @@ static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
if (MACH_IS_ATARI || MACH_IS_Q40) {
/* Atari has a byte-swapped IDE interface */
- outsw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer,
- bytecount / 2);
+ outsw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
return;
}
#endif /* CONFIG_ATARI || CONFIG_Q40 */
hwif->ata_output_data(drive, buffer, bytecount / 4);
if ((bytecount & 0x03) >= 2)
- hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET],
+ hwif->OUTSW(hwif->io_ports.data_addr,
(u8 *)buffer + (bytecount & ~0x03), 1);
}
@@ -429,7 +426,7 @@ int drive_is_ready (ide_drive_t *drive)
u8 stat = 0;
if (drive->waiting_for_dma)
- return hwif->ide_dma_test_irq(drive);
+ return hwif->dma_ops->dma_test_irq(drive);
#if 0
/* need to guarantee 400ns since last command was issued */
@@ -442,7 +439,7 @@ int drive_is_ready (ide_drive_t *drive)
* an interrupt with another pci card/device. We make no assumptions
* about possible isa-pnp and pci-pnp issues yet.
*/
- if (hwif->io_ports[IDE_CONTROL_OFFSET])
+ if (hwif->io_ports.ctl_addr)
stat = ide_read_altstatus(drive);
else
/* Note: this may clear a pending IRQ!! */
@@ -644,7 +641,7 @@ int ide_driveid_update(ide_drive_t *drive)
SELECT_MASK(drive, 1);
ide_set_irq(drive, 1);
msleep(50);
- hwif->OUTB(WIN_IDENTIFY, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_IDENTIFY, hwif->io_ports.command_addr);
timeout = jiffies + WAIT_WORSTCASE;
do {
if (time_after(jiffies, timeout)) {
@@ -693,6 +690,7 @@ int ide_driveid_update(ide_drive_t *drive)
int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
{
ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
int error = 0;
u8 stat;
@@ -700,8 +698,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
// msleep(50);
#ifdef CONFIG_BLK_DEV_IDEDMA
- if (hwif->dma_host_set) /* check if host supports DMA */
- hwif->dma_host_set(drive, 0);
+ if (hwif->dma_ops) /* check if host supports DMA */
+ hwif->dma_ops->dma_host_set(drive, 0);
#endif
/* Skip setting PIO flow-control modes on pre-EIDE drives */
@@ -731,10 +729,9 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
SELECT_MASK(drive, 0);
udelay(1);
ide_set_irq(drive, 0);
- hwif->OUTB(speed, hwif->io_ports[IDE_NSECTOR_OFFSET]);
- hwif->OUTB(SETFEATURES_XFER, hwif->io_ports[IDE_FEATURE_OFFSET]);
- hwif->OUTBSYNC(drive, WIN_SETFEATURES,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(speed, io_ports->nsect_addr);
+ hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
+ hwif->OUTBSYNC(drive, WIN_SETFEATURES, io_ports->command_addr);
if (drive->quirk_list == 2)
ide_set_irq(drive, 1);
@@ -759,8 +756,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
#ifdef CONFIG_BLK_DEV_IDEDMA
if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
drive->using_dma)
- hwif->dma_host_set(drive, 1);
- else if (hwif->dma_host_set) /* check if host supports DMA */
+ hwif->dma_ops->dma_host_set(drive, 1);
+ else if (hwif->dma_ops) /* check if host supports DMA */
ide_dma_off_quietly(drive);
#endif
@@ -842,7 +839,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
spin_lock_irqsave(&ide_lock, flags);
__ide_set_handler(drive, handler, timeout, expiry);
- hwif->OUTBSYNC(drive, cmd, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTBSYNC(drive, cmd, hwif->io_ports.command_addr);
/*
* Drive takes 400nS to respond, we must avoid the IRQ being
* serviced before that.
@@ -905,10 +902,11 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
{
ide_hwgroup_t *hwgroup = HWGROUP(drive);
ide_hwif_t *hwif = HWIF(drive);
+ const struct ide_port_ops *port_ops = hwif->port_ops;
u8 tmp;
- if (hwif->reset_poll != NULL) {
- if (hwif->reset_poll(drive)) {
+ if (port_ops && port_ops->reset_poll) {
+ if (port_ops->reset_poll(drive)) {
printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
hwif->name, drive->name);
return ide_stopped;
@@ -974,6 +972,8 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
static void pre_reset(ide_drive_t *drive)
{
+ const struct ide_port_ops *port_ops = drive->hwif->port_ops;
+
if (drive->media == ide_disk)
ide_disk_pre_reset(drive);
else
@@ -994,8 +994,8 @@ static void pre_reset(ide_drive_t *drive)
return;
}
- if (HWIF(drive)->pre_reset != NULL)
- HWIF(drive)->pre_reset(drive);
+ if (port_ops && port_ops->pre_reset)
+ port_ops->pre_reset(drive);
if (drive->current_speed != 0xff)
drive->desired_speed = drive->current_speed;
@@ -1023,12 +1023,16 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
unsigned long flags;
ide_hwif_t *hwif;
ide_hwgroup_t *hwgroup;
+ struct ide_io_ports *io_ports;
+ const struct ide_port_ops *port_ops;
u8 ctl;
spin_lock_irqsave(&ide_lock, flags);
hwif = HWIF(drive);
hwgroup = HWGROUP(drive);
+ io_ports = &hwif->io_ports;
+
/* We must not reset with running handlers */
BUG_ON(hwgroup->handler != NULL);
@@ -1038,8 +1042,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
pre_reset(drive);
SELECT_DRIVE(drive);
udelay (20);
- hwif->OUTBSYNC(drive, WIN_SRST,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr);
ndelay(400);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
hwgroup->polling = 1;
@@ -1055,7 +1058,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
for (unit = 0; unit < MAX_DRIVES; ++unit)
pre_reset(&hwif->drives[unit]);
- if (hwif->io_ports[IDE_CONTROL_OFFSET] == 0) {
+ if (io_ports->ctl_addr == 0) {
spin_unlock_irqrestore(&ide_lock, flags);
return ide_stopped;
}
@@ -1070,14 +1073,14 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
* recover from reset very quickly, saving us the first 50ms wait time.
*/
/* set SRST and nIEN */
- hwif->OUTBSYNC(drive, drive->ctl|6, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTBSYNC(drive, drive->ctl|6, io_ports->ctl_addr);
/* more than enough time */
udelay(10);
if (drive->quirk_list == 2)
ctl = drive->ctl; /* clear SRST and nIEN */
else
ctl = drive->ctl | 2; /* clear SRST, leave nIEN */
- hwif->OUTBSYNC(drive, ctl, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTBSYNC(drive, ctl, io_ports->ctl_addr);
/* more than enough time */
udelay(10);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1089,8 +1092,9 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
* state when the disks are reset this way. At least, the Winbond
* 553 documentation says that
*/
- if (hwif->resetproc)
- hwif->resetproc(drive);
+ port_ops = hwif->port_ops;
+ if (port_ops && port_ops->resetproc)
+ port_ops->resetproc(drive);
spin_unlock_irqrestore(&ide_lock, flags);
return ide_started;
@@ -1121,7 +1125,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
* about locking issues (2.5 work ?).
*/
mdelay(1);
- stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = hwif->INB(hwif->io_ports.status_addr);
if ((stat & BUSY_STAT) == 0)
return 0;
/*
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 7031a8dcf69..6f04ea3e93a 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -85,7 +85,7 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
mode = XFER_PIO_4;
}
-// printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed);
+/* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
return min(speed, mode);
}
@@ -274,16 +274,6 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
if (overridden)
printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
drive->name);
-
- /*
- * Conservative "downgrade" for all pre-ATA2 drives
- */
- if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_DOWNGRADE) == 0 &&
- pio_mode && pio_mode < 4) {
- pio_mode--;
- printk(KERN_INFO "%s: applying conservative "
- "PIO \"downgrade\"\n", drive->name);
- }
}
if (pio_mode > max_mode)
@@ -298,9 +288,11 @@ EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
void ide_set_pio(ide_drive_t *drive, u8 req_pio)
{
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
u8 host_pio, pio;
- if (hwif->set_pio_mode == NULL)
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
return;
BUG_ON(hwif->pio_mask == 0x00);
@@ -352,26 +344,30 @@ void ide_toggle_bounce(ide_drive_t *drive, int on)
int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
{
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ return 0;
- if (hwif->set_pio_mode == NULL)
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL)
return -1;
/*
* TODO: temporary hack for some legacy host drivers that didn't
* set transfer mode on the device in ->set_pio_mode method...
*/
- if (hwif->set_dma_mode == NULL) {
- hwif->set_pio_mode(drive, mode - XFER_PIO_0);
+ if (port_ops->set_dma_mode == NULL) {
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
return 0;
}
if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
if (ide_config_drive_speed(drive, mode))
return -1;
- hwif->set_pio_mode(drive, mode - XFER_PIO_0);
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
return 0;
} else {
- hwif->set_pio_mode(drive, mode - XFER_PIO_0);
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
return ide_config_drive_speed(drive, mode);
}
}
@@ -379,17 +375,21 @@ int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
{
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ return 0;
- if (hwif->set_dma_mode == NULL)
+ if (port_ops == NULL || port_ops->set_dma_mode == NULL)
return -1;
if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
if (ide_config_drive_speed(drive, mode))
return -1;
- hwif->set_dma_mode(drive, mode);
+ port_ops->set_dma_mode(drive, mode);
return 0;
} else {
- hwif->set_dma_mode(drive, mode);
+ port_ops->set_dma_mode(drive, mode);
return ide_config_drive_speed(drive, mode);
}
}
@@ -409,8 +409,10 @@ EXPORT_SYMBOL_GPL(ide_set_dma_mode);
int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
{
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
- if (hwif->set_dma_mode == NULL)
+ if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
return -1;
rate = ide_rate_filter(drive, rate);
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 34c2ad36ce5..6a8953f68e9 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -11,34 +11,52 @@
*
* You should have received a copy of the GNU General Public License
* (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/pnp.h>
#include <linux/ide.h>
+#define DRV_NAME "ide-pnp"
+
/* Add your devices here :)) */
static struct pnp_device_id idepnp_devices[] = {
- /* Generic ESDI/IDE/ATA compatible hard disk controller */
+ /* Generic ESDI/IDE/ATA compatible hard disk controller */
{.id = "PNP0600", .driver_data = 0},
{.id = ""}
};
-static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id)
+static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
hw_regs_t hw;
ide_hwif_t *hwif;
+ unsigned long base, ctl;
if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
return -1;
+ base = pnp_port_start(dev, 0);
+ ctl = pnp_port_start(dev, 1);
+
+ if (!request_region(base, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+ DRV_NAME, base, base + 7);
+ return -EBUSY;
+ }
+
+ if (!request_region(ctl, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+ DRV_NAME, ctl);
+ release_region(base, 8);
+ return -EBUSY;
+ }
+
memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, pnp_port_start(dev, 0),
- pnp_port_start(dev, 1));
+ ide_std_init_ports(&hw, base, ctl);
hw.irq = pnp_irq(dev, 0);
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif) {
u8 index = hwif->index;
u8 idx[4] = { index, 0xff, 0xff, 0xff };
@@ -47,24 +65,27 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
ide_init_port_hw(hwif, &hw);
printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
- pnp_set_drvdata(dev,hwif);
+ pnp_set_drvdata(dev, hwif);
ide_device_add(idx, NULL);
return 0;
}
+ release_region(ctl, 1);
+ release_region(base, 8);
+
return -1;
}
-static void idepnp_remove(struct pnp_dev * dev)
+static void idepnp_remove(struct pnp_dev *dev)
{
ide_hwif_t *hwif = pnp_get_drvdata(dev);
- if (hwif)
- ide_unregister(hwif->index);
- else
- printk(KERN_ERR "idepnp: Unable to remove device, please report.\n");
+ ide_unregister(hwif);
+
+ release_region(pnp_port_start(dev, 1), 1);
+ release_region(pnp_port_start(dev, 0), 8);
}
static struct pnp_driver idepnp_driver = {
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 6a196c27b0a..862f02603f9 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -264,6 +264,7 @@ err_misc:
static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = HWIF(drive);
+ struct ide_io_ports *io_ports = &hwif->io_ports;
int use_altstatus = 0, rc;
unsigned long timeout;
u8 s = 0, a = 0;
@@ -271,7 +272,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
/* take a deep breath */
msleep(50);
- if (hwif->io_ports[IDE_CONTROL_OFFSET]) {
+ if (io_ports->ctl_addr) {
a = ide_read_altstatus(drive);
s = ide_read_status(drive);
if ((a ^ s) & ~INDEX_STAT)
@@ -289,10 +290,10 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
*/
if ((cmd == WIN_PIDENTIFY))
/* disable dma & overlap */
- hwif->OUTB(0, hwif->io_ports[IDE_FEATURE_OFFSET]);
+ hwif->OUTB(0, io_ports->feature_addr);
/* ask drive for ID */
- hwif->OUTB(cmd, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(cmd, io_ports->command_addr);
timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
timeout += jiffies;
@@ -353,7 +354,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
* interrupts during the identify-phase that
* the irq handler isn't expecting.
*/
- if (hwif->io_ports[IDE_CONTROL_OFFSET]) {
+ if (hwif->io_ports.ctl_addr) {
if (!hwif->irq) {
autoprobe = 1;
cookie = probe_irq_on();
@@ -393,7 +394,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
do {
msleep(50);
- stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = hwif->INB(hwif->io_ports.status_addr);
if ((stat & BUSY_STAT) == 0)
return 0;
} while (time_before(jiffies, timeout));
@@ -425,6 +426,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
static int do_probe (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = HWIF(drive);
+ struct ide_io_ports *io_ports = &hwif->io_ports;
int rc;
u8 stat;
@@ -445,7 +447,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
msleep(50);
SELECT_DRIVE(drive);
msleep(50);
- if (hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]) != drive->select.all &&
+ if (hwif->INB(io_ports->device_addr) != drive->select.all &&
!drive->present) {
if (drive->select.b.unit != 0) {
/* exit with drive0 selected */
@@ -472,17 +474,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
if (stat == (BUSY_STAT | READY_STAT))
return 4;
- if ((rc == 1 && cmd == WIN_PIDENTIFY) &&
- ((drive->autotune == IDE_TUNE_DEFAULT) ||
- (drive->autotune == IDE_TUNE_AUTO))) {
+ if (rc == 1 && cmd == WIN_PIDENTIFY) {
printk(KERN_ERR "%s: no response (status = 0x%02x), "
"resetting drive\n", drive->name, stat);
msleep(50);
- hwif->OUTB(drive->select.all,
- hwif->io_ports[IDE_SELECT_OFFSET]);
+ hwif->OUTB(drive->select.all, io_ports->device_addr);
msleep(50);
- hwif->OUTB(WIN_SRST,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_SRST, io_ports->command_addr);
(void)ide_busy_sleep(hwif);
rc = try_to_identify(drive, cmd);
}
@@ -518,7 +516,7 @@ static void enable_nest (ide_drive_t *drive)
printk("%s: enabling %s -- ", hwif->name, drive->id->model);
SELECT_DRIVE(drive);
msleep(50);
- hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
if (ide_busy_sleep(hwif)) {
printk(KERN_CONT "failed (timeout)\n");
@@ -644,7 +642,7 @@ static int ide_register_port(ide_hwif_t *hwif)
ret = device_register(&hwif->gendev);
if (ret < 0) {
printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
- __FUNCTION__, ret);
+ __func__, ret);
goto out;
}
@@ -773,8 +771,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
BUG_ON(hwif->present);
- if (hwif->noprobe ||
- (hwif->drives[0].noprobe && hwif->drives[1].noprobe))
+ if (hwif->drives[0].noprobe && hwif->drives[1].noprobe)
return -EACCES;
/*
@@ -801,14 +798,9 @@ static int ide_probe_port(ide_hwif_t *hwif)
if (drive->present)
rc = 0;
}
- if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) {
- printk(KERN_WARNING "%s: reset\n", hwif->name);
- hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
- udelay(10);
- hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
- (void)ide_busy_sleep(hwif);
- }
+
local_irq_restore(flags);
+
/*
* Use cached IRQ number. It might be (and is...) changed by probe
* code above
@@ -821,29 +813,25 @@ static int ide_probe_port(ide_hwif_t *hwif)
static void ide_port_tune_devices(ide_hwif_t *hwif)
{
+ const struct ide_port_ops *port_ops = hwif->port_ops;
int unit;
for (unit = 0; unit < MAX_DRIVES; unit++) {
ide_drive_t *drive = &hwif->drives[unit];
- if (drive->present && hwif->quirkproc)
- hwif->quirkproc(drive);
+ if (drive->present && port_ops && port_ops->quirkproc)
+ port_ops->quirkproc(drive);
}
for (unit = 0; unit < MAX_DRIVES; ++unit) {
ide_drive_t *drive = &hwif->drives[unit];
if (drive->present) {
- if (drive->autotune == IDE_TUNE_AUTO)
- ide_set_max_pio(drive);
-
- if (drive->autotune != IDE_TUNE_DEFAULT &&
- drive->autotune != IDE_TUNE_AUTO)
- continue;
+ ide_set_max_pio(drive);
drive->nice1 = 1;
- if (hwif->dma_host_set)
+ if (hwif->dma_ops)
ide_set_dma(drive);
}
}
@@ -994,6 +982,7 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
*/
static int init_irq (ide_hwif_t *hwif)
{
+ struct ide_io_ports *io_ports = &hwif->io_ports;
unsigned int index;
ide_hwgroup_t *hwgroup;
ide_hwif_t *match = NULL;
@@ -1077,9 +1066,9 @@ static int init_irq (ide_hwif_t *hwif)
if (IDE_CHIPSET_IS_PCI(hwif->chipset))
sa = IRQF_SHARED;
- if (hwif->io_ports[IDE_CONTROL_OFFSET])
+ if (io_ports->ctl_addr)
/* clear nIEN */
- hwif->OUTB(0x08, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTB(0x08, io_ports->ctl_addr);
if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
goto out_unlink;
@@ -1095,12 +1084,11 @@ static int init_irq (ide_hwif_t *hwif)
#if !defined(__mc68000__)
printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
- hwif->io_ports[IDE_DATA_OFFSET],
- hwif->io_ports[IDE_DATA_OFFSET]+7,
- hwif->io_ports[IDE_CONTROL_OFFSET], hwif->irq);
+ io_ports->data_addr, io_ports->status_addr,
+ io_ports->ctl_addr, hwif->irq);
#else
printk("%s at 0x%08lx on irq %d", hwif->name,
- hwif->io_ports[IDE_DATA_OFFSET], hwif->irq);
+ io_ports->data_addr, hwif->irq);
#endif /* __mc68000__ */
if (match)
printk(" (%sed with %s)",
@@ -1242,8 +1230,8 @@ static int hwif_init(ide_hwif_t *hwif)
int old_irq;
if (!hwif->irq) {
- if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET])))
- {
+ hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
+ if (!hwif->irq) {
printk("%s: DISABLED, NO IRQ\n", hwif->name);
return 0;
}
@@ -1272,7 +1260,8 @@ static int hwif_init(ide_hwif_t *hwif)
* It failed to initialise. Find the default IRQ for
* this port and try that.
*/
- if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]))) {
+ hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
+ if (!hwif->irq) {
printk("%s: Disabled unable to get IRQ %d.\n",
hwif->name, old_irq);
goto out;
@@ -1324,6 +1313,7 @@ static void hwif_register_devices(ide_hwif_t *hwif)
static void ide_port_init_devices(ide_hwif_t *hwif)
{
+ const struct ide_port_ops *port_ops = hwif->port_ops;
int i;
for (i = 0; i < MAX_DRIVES; i++) {
@@ -1335,12 +1325,10 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
drive->unmask = 1;
if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
drive->no_unmask = 1;
- if ((hwif->host_flags & IDE_HFLAG_NO_AUTOTUNE) == 0)
- drive->autotune = 1;
}
- if (hwif->port_init_devs)
- hwif->port_init_devs(hwif);
+ if (port_ops && port_ops->port_init_devs)
+ port_ops->port_init_devs(hwif);
}
static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
@@ -1355,9 +1343,6 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
if (d->init_iops)
d->init_iops(hwif);
- if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0)
- ide_hwif_setup_dma(hwif, d);
-
if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) ||
(d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
hwif->irq = port ? 15 : 14;
@@ -1365,16 +1350,36 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
hwif->host_flags = d->host_flags;
hwif->pio_mask = d->pio_mask;
- if ((d->host_flags & IDE_HFLAG_SERIALIZE) && hwif->mate)
- hwif->mate->serialized = hwif->serialized = 1;
+ /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
+ if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
+ hwif->port_ops = d->port_ops;
+
+ if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
+ ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) {
+ if (hwif->mate)
+ hwif->mate->serialized = hwif->serialized = 1;
+ }
hwif->swdma_mask = d->swdma_mask;
hwif->mwdma_mask = d->mwdma_mask;
hwif->ultra_mask = d->udma_mask;
- /* reset DMA masks only for SFF-style DMA controllers */
- if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0 && hwif->dma_base == 0)
- hwif->swdma_mask = hwif->mwdma_mask = hwif->ultra_mask = 0;
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ int rc;
+
+ if (d->init_dma)
+ rc = d->init_dma(hwif, d);
+ else
+ rc = ide_hwif_setup_dma(hwif, d);
+
+ if (rc < 0) {
+ printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
+ hwif->swdma_mask = 0;
+ hwif->mwdma_mask = 0;
+ hwif->ultra_mask = 0;
+ } else if (d->dma_ops)
+ hwif->dma_ops = d->dma_ops;
+ }
if (d->host_flags & IDE_HFLAG_RQSIZE_256)
hwif->rqsize = 256;
@@ -1386,9 +1391,11 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
static void ide_port_cable_detect(ide_hwif_t *hwif)
{
- if (hwif->cable_detect && (hwif->ultra_mask & 0x78)) {
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) {
if (hwif->cbl != ATA_CBL_PATA40_SHORT)
- hwif->cbl = hwif->cable_detect(hwif);
+ hwif->cbl = port_ops->cable_detect(hwif);
}
}
@@ -1444,19 +1451,74 @@ static int ide_sysfs_register_port(ide_hwif_t *hwif)
return rc;
}
+/**
+ * ide_find_port_slot - find free ide_hwifs[] slot
+ * @d: IDE port info
+ *
+ * Return the new hwif. If we are out of free slots return NULL.
+ */
+
+ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
+{
+ ide_hwif_t *hwif;
+ int i;
+ u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
+
+ /*
+ * Claim an unassigned slot.
+ *
+ * Give preference to claiming other slots before claiming ide0/ide1,
+ * just in case there's another interface yet-to-be-scanned
+ * which uses ports 0x1f0/0x170 (the ide0/ide1 defaults).
+ *
+ * Unless there is a bootable card that does not use the standard
+ * ports 0x1f0/0x170 (the ide0/ide1 defaults).
+ */
+ if (bootable) {
+ i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;
+
+ for (; i < MAX_HWIFS; i++) {
+ hwif = &ide_hwifs[i];
+ if (hwif->chipset == ide_unknown)
+ return hwif;
+ }
+ } else {
+ for (i = 2; i < MAX_HWIFS; i++) {
+ hwif = &ide_hwifs[i];
+ if (hwif->chipset == ide_unknown)
+ return hwif;
+ }
+ for (i = 0; i < 2 && i < MAX_HWIFS; i++) {
+ hwif = &ide_hwifs[i];
+ if (hwif->chipset == ide_unknown)
+ return hwif;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ide_find_port_slot);
+
int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
{
ide_hwif_t *hwif, *mate = NULL;
int i, rc = 0;
for (i = 0; i < MAX_HWIFS; i++) {
- if (d == NULL || idx[i] == 0xff) {
+ if (idx[i] == 0xff) {
mate = NULL;
continue;
}
hwif = &ide_hwifs[idx[i]];
+ ide_port_apply_params(hwif);
+
+ if (d == NULL) {
+ mate = NULL;
+ continue;
+ }
+
if (d->chipset != ide_etrax100 && (i & 1) && mate) {
hwif->mate = mate;
mate->mate = hwif;
@@ -1475,25 +1537,15 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
hwif = &ide_hwifs[idx[i]];
- if ((hwif->chipset != ide_4drives || !hwif->mate ||
- !hwif->mate->present) && ide_hwif_request_regions(hwif)) {
- printk(KERN_ERR "%s: ports already in use, "
- "skipping probe\n", hwif->name);
- continue;
- }
-
- if (ide_probe_port(hwif) < 0) {
- ide_hwif_release_regions(hwif);
- continue;
- }
-
- hwif->present = 1;
+ if (ide_probe_port(hwif) == 0)
+ hwif->present = 1;
if (hwif->chipset != ide_4drives || !hwif->mate ||
!hwif->mate->present)
ide_register_port(hwif);
- ide_port_tune_devices(hwif);
+ if (hwif->present)
+ ide_port_tune_devices(hwif);
}
for (i = 0; i < MAX_HWIFS; i++) {
@@ -1502,9 +1554,6 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
hwif = &ide_hwifs[idx[i]];
- if (!hwif->present)
- continue;
-
if (hwif_init(hwif) == 0) {
printk(KERN_INFO "%s: failed to initialize IDE "
"interface\n", hwif->name);
@@ -1513,10 +1562,13 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
continue;
}
- ide_port_setup_devices(hwif);
+ if (hwif->present)
+ ide_port_setup_devices(hwif);
ide_acpi_init(hwif);
- ide_acpi_port_init_devices(hwif);
+
+ if (hwif->present)
+ ide_acpi_port_init_devices(hwif);
}
for (i = 0; i < MAX_HWIFS; i++) {
@@ -1525,11 +1577,11 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
hwif = &ide_hwifs[idx[i]];
- if (hwif->present) {
- if (hwif->chipset == ide_unknown)
- hwif->chipset = ide_generic;
+ if (hwif->chipset == ide_unknown)
+ hwif->chipset = ide_generic;
+
+ if (hwif->present)
hwif_register_devices(hwif);
- }
}
for (i = 0; i < MAX_HWIFS; i++) {
@@ -1538,11 +1590,11 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
hwif = &ide_hwifs[idx[i]];
- if (hwif->present) {
- ide_sysfs_register_port(hwif);
- ide_proc_register_port(hwif);
+ ide_sysfs_register_port(hwif);
+ ide_proc_register_port(hwif);
+
+ if (hwif->present)
ide_proc_port_register_devices(hwif);
- }
}
return rc;
@@ -1563,6 +1615,7 @@ EXPORT_SYMBOL_GPL(ide_device_add);
void ide_port_scan(ide_hwif_t *hwif)
{
+ ide_port_apply_params(hwif);
ide_port_cable_detect(hwif);
ide_port_init_devices(hwif);
@@ -1578,3 +1631,67 @@ void ide_port_scan(ide_hwif_t *hwif)
ide_proc_port_register_devices(hwif);
}
EXPORT_SYMBOL_GPL(ide_port_scan);
+
+static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
+ const struct ide_port_info *d,
+ unsigned long config)
+{
+ ide_hwif_t *hwif;
+ unsigned long base, ctl;
+ int irq;
+
+ if (port_no == 0) {
+ base = 0x1f0;
+ ctl = 0x3f6;
+ irq = 14;
+ } else {
+ base = 0x170;
+ ctl = 0x376;
+ irq = 15;
+ }
+
+ if (!request_region(base, 8, d->name)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+ d->name, base, base + 7);
+ return;
+ }
+
+ if (!request_region(ctl, 1, d->name)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+ d->name, ctl);
+ release_region(base, 8);
+ return;
+ }
+
+ ide_std_init_ports(hw, base, ctl);
+ hw->irq = irq;
+
+ hwif = ide_find_port_slot(d);
+ if (hwif) {
+ ide_init_port_hw(hwif, hw);
+ if (config)
+ hwif->config_data = config;
+ idx[port_no] = hwif->index;
+ }
+}
+
+int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
+{
+ u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw[2];
+
+ memset(&hw, 0, sizeof(hw));
+
+ if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
+ ide_legacy_init_one(idx, &hw[0], 0, d, config);
+ ide_legacy_init_one(idx, &hw[1], 1, d, config);
+
+ if (idx[0] == 0xff && idx[1] == 0xff &&
+ (d->host_flags & IDE_HFLAG_SINGLE))
+ return -ENOENT;
+
+ ide_device_add(idx, d);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index edd7f186dc4..7b2f3815a83 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -47,28 +47,28 @@ static int proc_ide_read_imodel
const char *name;
switch (hwif->chipset) {
- case ide_generic: name = "generic"; break;
- case ide_pci: name = "pci"; break;
- case ide_cmd640: name = "cmd640"; break;
- case ide_dtc2278: name = "dtc2278"; break;
- case ide_ali14xx: name = "ali14xx"; break;
- case ide_qd65xx: name = "qd65xx"; break;
- case ide_umc8672: name = "umc8672"; break;
- case ide_ht6560b: name = "ht6560b"; break;
- case ide_rz1000: name = "rz1000"; break;
- case ide_trm290: name = "trm290"; break;
- case ide_cmd646: name = "cmd646"; break;
- case ide_cy82c693: name = "cy82c693"; break;
- case ide_4drives: name = "4drives"; break;
- case ide_pmac: name = "mac-io"; break;
- case ide_au1xxx: name = "au1xxx"; break;
- case ide_palm3710: name = "palm3710"; break;
- case ide_etrax100: name = "etrax100"; break;
- case ide_acorn: name = "acorn"; break;
- default: name = "(unknown)"; break;
+ case ide_generic: name = "generic"; break;
+ case ide_pci: name = "pci"; break;
+ case ide_cmd640: name = "cmd640"; break;
+ case ide_dtc2278: name = "dtc2278"; break;
+ case ide_ali14xx: name = "ali14xx"; break;
+ case ide_qd65xx: name = "qd65xx"; break;
+ case ide_umc8672: name = "umc8672"; break;
+ case ide_ht6560b: name = "ht6560b"; break;
+ case ide_rz1000: name = "rz1000"; break;
+ case ide_trm290: name = "trm290"; break;
+ case ide_cmd646: name = "cmd646"; break;
+ case ide_cy82c693: name = "cy82c693"; break;
+ case ide_4drives: name = "4drives"; break;
+ case ide_pmac: name = "mac-io"; break;
+ case ide_au1xxx: name = "au1xxx"; break;
+ case ide_palm3710: name = "palm3710"; break;
+ case ide_etrax100: name = "etrax100"; break;
+ case ide_acorn: name = "acorn"; break;
+ default: name = "(unknown)"; break;
}
len = sprintf(page, "%s\n", name);
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
static int proc_ide_read_mate
@@ -81,7 +81,7 @@ static int proc_ide_read_mate
len = sprintf(page, "%s\n", hwif->mate->name);
else
len = sprintf(page, "(none)\n");
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
static int proc_ide_read_channel
@@ -93,7 +93,7 @@ static int proc_ide_read_channel
page[0] = hwif->channel ? '1' : '0';
page[1] = '\n';
len = 2;
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
static int proc_ide_read_identify
@@ -120,7 +120,7 @@ static int proc_ide_read_identify
len = out - page;
}
}
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
/**
@@ -197,7 +197,7 @@ EXPORT_SYMBOL(ide_add_setting);
* The caller must hold the setting semaphore.
*/
-static void __ide_remove_setting (ide_drive_t *drive, char *name)
+static void __ide_remove_setting(ide_drive_t *drive, char *name)
{
ide_settings_t **p, *setting;
@@ -205,7 +205,8 @@ static void __ide_remove_setting (ide_drive_t *drive, char *name)
while ((*p) && strcmp((*p)->name, name))
p = &((*p)->next);
- if ((setting = (*p)) == NULL)
+ setting = (*p);
+ if (setting == NULL)
return;
(*p) = setting->next;
@@ -223,7 +224,7 @@ static void __ide_remove_setting (ide_drive_t *drive, char *name)
* caller must hold ide_setting_mtx.
*/
-static void auto_remove_settings (ide_drive_t *drive)
+static void auto_remove_settings(ide_drive_t *drive)
{
ide_settings_t *setting;
repeat:
@@ -279,16 +280,16 @@ static int ide_read_setting(ide_drive_t *drive, ide_settings_t *setting)
if ((setting->rw & SETTING_READ)) {
spin_lock_irqsave(&ide_lock, flags);
- switch(setting->data_type) {
- case TYPE_BYTE:
- val = *((u8 *) setting->data);
- break;
- case TYPE_SHORT:
- val = *((u16 *) setting->data);
- break;
- case TYPE_INT:
- val = *((u32 *) setting->data);
- break;
+ switch (setting->data_type) {
+ case TYPE_BYTE:
+ val = *((u8 *) setting->data);
+ break;
+ case TYPE_SHORT:
+ val = *((u16 *) setting->data);
+ break;
+ case TYPE_INT:
+ val = *((u32 *) setting->data);
+ break;
}
spin_unlock_irqrestore(&ide_lock, flags);
}
@@ -326,15 +327,15 @@ static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int va
if (ide_spin_wait_hwgroup(drive))
return -EBUSY;
switch (setting->data_type) {
- case TYPE_BYTE:
- *((u8 *) setting->data) = val;
- break;
- case TYPE_SHORT:
- *((u16 *) setting->data) = val;
- break;
- case TYPE_INT:
- *((u32 *) setting->data) = val;
- break;
+ case TYPE_BYTE:
+ *((u8 *) setting->data) = val;
+ break;
+ case TYPE_SHORT:
+ *((u16 *) setting->data) = val;
+ break;
+ case TYPE_INT:
+ *((u32 *) setting->data) = val;
+ break;
}
spin_unlock_irq(&ide_lock);
return 0;
@@ -390,7 +391,7 @@ void ide_add_generic_settings (ide_drive_t *drive)
static void proc_ide_settings_warn(void)
{
- static int warned = 0;
+ static int warned;
if (warned)
return;
@@ -413,11 +414,12 @@ static int proc_ide_read_settings
mutex_lock(&ide_setting_mtx);
out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n");
out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n");
- while(setting) {
+ while (setting) {
mul_factor = setting->mul_factor;
div_factor = setting->div_factor;
out += sprintf(out, "%-24s", setting->name);
- if ((rc = ide_read_setting(drive, setting)) >= 0)
+ rc = ide_read_setting(drive, setting);
+ if (rc >= 0)
out += sprintf(out, "%-16d", rc * mul_factor / div_factor);
else
out += sprintf(out, "%-16s", "write-only");
@@ -431,7 +433,7 @@ static int proc_ide_read_settings
}
len = out - page;
mutex_unlock(&ide_setting_mtx);
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
#define MAX_LEN 30
@@ -512,8 +514,7 @@ static int proc_ide_write_settings(struct file *file, const char __user *buffer,
mutex_lock(&ide_setting_mtx);
setting = ide_find_setting_by_name(drive, name);
- if (!setting)
- {
+ if (!setting) {
mutex_unlock(&ide_setting_mtx);
goto parse_error;
}
@@ -533,8 +534,8 @@ parse_error:
int proc_ide_read_capacity
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
- int len = sprintf(page,"%llu\n", (long long)0x7fffffff);
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ int len = sprintf(page, "%llu\n", (long long)0x7fffffff);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
EXPORT_SYMBOL_GPL(proc_ide_read_capacity);
@@ -546,13 +547,13 @@ int proc_ide_read_geometry
char *out = page;
int len;
- out += sprintf(out,"physical %d/%d/%d\n",
+ out += sprintf(out, "physical %d/%d/%d\n",
drive->cyl, drive->head, drive->sect);
- out += sprintf(out,"logical %d/%d/%d\n",
+ out += sprintf(out, "logical %d/%d/%d\n",
drive->bios_cyl, drive->bios_head, drive->bios_sect);
len = out - page;
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
EXPORT_SYMBOL(proc_ide_read_geometry);
@@ -566,7 +567,7 @@ static int proc_ide_read_dmodel
len = sprintf(page, "%.40s\n",
(id && id->model[0]) ? (char *)id->model : "(none)");
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
static int proc_ide_read_driver
@@ -583,7 +584,7 @@ static int proc_ide_read_driver
dev->driver->name, ide_drv->version);
} else
len = sprintf(page, "ide-default version 0.9.newide\n");
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
@@ -598,14 +599,14 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
err = device_attach(dev);
if (err < 0)
printk(KERN_WARNING "IDE: %s: device_attach error: %d\n",
- __FUNCTION__, err);
+ __func__, err);
drive->driver_req[0] = 0;
if (dev->driver == NULL) {
err = device_attach(dev);
if (err < 0)
printk(KERN_WARNING
"IDE: %s: device_attach(2) error: %d\n",
- __FUNCTION__, err);
+ __func__, err);
}
if (dev->driver && !strcmp(dev->driver->name, driver))
ret = 0;
@@ -639,30 +640,26 @@ static int proc_ide_read_media
int len;
switch (drive->media) {
- case ide_disk: media = "disk\n";
- break;
- case ide_cdrom: media = "cdrom\n";
- break;
- case ide_tape: media = "tape\n";
- break;
- case ide_floppy:media = "floppy\n";
- break;
- case ide_optical:media = "optical\n";
- break;
- default: media = "UNKNOWN\n";
- break;
+ case ide_disk: media = "disk\n"; break;
+ case ide_cdrom: media = "cdrom\n"; break;
+ case ide_tape: media = "tape\n"; break;
+ case ide_floppy: media = "floppy\n"; break;
+ case ide_optical: media = "optical\n"; break;
+ default: media = "UNKNOWN\n"; break;
}
- strcpy(page,media);
+ strcpy(page, media);
len = strlen(media);
- PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
}
static ide_proc_entry_t generic_drive_entries[] = {
- { "driver", S_IFREG|S_IRUGO, proc_ide_read_driver, proc_ide_write_driver },
- { "identify", S_IFREG|S_IRUSR, proc_ide_read_identify, NULL },
- { "media", S_IFREG|S_IRUGO, proc_ide_read_media, NULL },
- { "model", S_IFREG|S_IRUGO, proc_ide_read_dmodel, NULL },
- { "settings", S_IFREG|S_IRUSR|S_IWUSR,proc_ide_read_settings, proc_ide_write_settings },
+ { "driver", S_IFREG|S_IRUGO, proc_ide_read_driver,
+ proc_ide_write_driver },
+ { "identify", S_IFREG|S_IRUSR, proc_ide_read_identify, NULL },
+ { "media", S_IFREG|S_IRUGO, proc_ide_read_media, NULL },
+ { "model", S_IFREG|S_IRUGO, proc_ide_read_dmodel, NULL },
+ { "settings", S_IFREG|S_IRUSR|S_IWUSR, proc_ide_read_settings,
+ proc_ide_write_settings },
{ NULL, 0, NULL, NULL }
};
@@ -734,7 +731,6 @@ void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
spin_unlock_irqrestore(&ide_lock, flags);
mutex_unlock(&ide_setting_mtx);
}
-
EXPORT_SYMBOL(ide_proc_unregister_driver);
void ide_proc_port_register_devices(ide_hwif_t *hwif)
@@ -755,7 +751,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
drive->proc = proc_mkdir(drive->name, parent);
if (drive->proc)
ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
- sprintf(name,"ide%d/%s", (drive->name[2]-'a')/2, drive->name);
+ sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
ent = proc_symlink(drive->name, proc_ide_root, name);
if (!ent) return;
}
@@ -790,15 +786,6 @@ void ide_proc_register_port(ide_hwif_t *hwif)
}
}
-#ifdef CONFIG_BLK_DEV_IDEPCI
-void ide_pci_create_host_proc(const char *name, get_info_t *get_info)
-{
- create_proc_info_entry(name, 0, proc_ide_root, get_info);
-}
-
-EXPORT_SYMBOL_GPL(ide_pci_create_host_proc);
-#endif
-
void ide_proc_unregister_port(ide_hwif_t *hwif)
{
if (hwif->proc) {
@@ -825,7 +812,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver);
if (err < 0)
printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n",
- __FUNCTION__, err);
+ __func__, err);
return 0;
}
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index 98888da1b60..0e79efff1de 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -102,7 +102,7 @@ static int __init ide_scan_pcibus(void)
if (__pci_register_driver(d, d->driver.owner,
d->driver.mod_name))
printk(KERN_ERR "%s: failed to register %s driver\n",
- __FUNCTION__, d->driver.mod_name);
+ __func__, d->driver.mod_name);
}
return 0;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index f43fd070f1b..29870c41511 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -72,26 +72,6 @@ enum {
#endif
/**************************** Tunable parameters *****************************/
-
-
-/*
- * Pipelined mode parameters.
- *
- * We try to use the minimum number of stages which is enough to keep the tape
- * constantly streaming. To accomplish that, we implement a feedback loop around
- * the maximum number of stages:
- *
- * We start from MIN maximum stages (we will not even use MIN stages if we don't
- * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
- * pipeline is empty, until we reach the optimum value or until we reach MAX.
- *
- * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
- * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
- */
-#define IDETAPE_MIN_PIPELINE_STAGES 1
-#define IDETAPE_MAX_PIPELINE_STAGES 400
-#define IDETAPE_INCREASE_STAGES_RATE 20
-
/*
* After each failed packet command we issue a request sense command and retry
* the packet command IDETAPE_MAX_PC_RETRIES times.
@@ -224,28 +204,17 @@ enum {
/* 0 When the tape position is unknown */
IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
/* Device already opened */
- IDETAPE_FLAG_BUSY = (1 << 2),
- /* Error detected in a pipeline stage */
- IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
+ IDETAPE_FLAG_BUSY = (1 << 2),
/* Attempt to auto-detect the current user block size */
- IDETAPE_FLAG_DETECT_BS = (1 << 4),
+ IDETAPE_FLAG_DETECT_BS = (1 << 3),
/* Currently on a filemark */
- IDETAPE_FLAG_FILEMARK = (1 << 5),
+ IDETAPE_FLAG_FILEMARK = (1 << 4),
/* DRQ interrupt device */
- IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
- /* pipeline active */
- IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
+ IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5),
/* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
+ IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6),
};
-/* A pipeline stage. */
-typedef struct idetape_stage_s {
- struct request rq; /* The corresponding request */
- struct idetape_bh *bh; /* The data buffers */
- struct idetape_stage_s *next; /* Pointer to the next stage */
-} idetape_stage_t;
-
/*
* Most of our global data which we need to save even as we leave the driver due
* to an interrupt or a timer event is stored in the struct defined below.
@@ -289,9 +258,7 @@ typedef struct ide_tape_obj {
* While polling for DSC we use postponed_rq to postpone the current
* request so that ide.c will be able to service pending requests on the
* other device. Note that at most we will have only one DSC (usually
- * data transfer) request in the device request queue. Additional
- * requests can be queued in our internal pipeline, but they will be
- * visible to ide.c only one at a time.
+ * data transfer) request in the device request queue.
*/
struct request *postponed_rq;
/* The time in which we started polling for DSC */
@@ -331,43 +298,20 @@ typedef struct ide_tape_obj {
* At most, there is only one ide-tape originated data transfer request
* in the device request queue. This allows ide.c to easily service
* requests from the other device when we postpone our active request.
- * In the pipelined operation mode, we use our internal pipeline
- * structure to hold more data requests. The data buffer size is chosen
- * based on the tape's recommendation.
*/
- /* ptr to the request which is waiting in the device request queue */
- struct request *active_data_rq;
+
/* Data buffer size chosen based on the tape's recommendation */
- int stage_size;
- idetape_stage_t *merge_stage;
- int merge_stage_size;
+ int buffer_size;
+ /* merge buffer */
+ struct idetape_bh *merge_bh;
+ /* size of the merge buffer */
+ int merge_bh_size;
+ /* pointer to current buffer head within the merge buffer */
struct idetape_bh *bh;
char *b_data;
int b_count;
- /*
- * Pipeline parameters.
- *
- * To accomplish non-pipelined mode, we simply set the following
- * variables to zero (or NULL, where appropriate).
- */
- /* Number of currently used stages */
- int nr_stages;
- /* Number of pending stages */
- int nr_pending_stages;
- /* We will not allocate more than this number of stages */
- int max_stages, min_pipeline, max_pipeline;
- /* The first stage which will be removed from the pipeline */
- idetape_stage_t *first_stage;
- /* The currently active stage */
- idetape_stage_t *active_stage;
- /* Will be serviced after the currently active request */
- idetape_stage_t *next_stage;
- /* New requests will be added to the pipeline here */
- idetape_stage_t *last_stage;
- /* Optional free stage which we can use */
- idetape_stage_t *cache_stage;
- int pages_per_stage;
+ int pages_per_buffer;
/* Wasted space in each stage */
int excess_bh_size;
@@ -388,45 +332,6 @@ typedef struct ide_tape_obj {
/* the tape is write protected (hardware or opened as read-only) */
char write_prot;
- /*
- * Limit the number of times a request can be postponed, to avoid an
- * infinite postpone deadlock.
- */
- int postpone_cnt;
-
- /*
- * Measures number of frames:
- *
- * 1. written/read to/from the driver pipeline (pipeline_head).
- * 2. written/read to/from the tape buffers (idetape_bh).
- * 3. written/read by the tape to/from the media (tape_head).
- */
- int pipeline_head;
- int buffer_head;
- int tape_head;
- int last_tape_head;
-
- /* Speed control at the tape buffers input/output */
- unsigned long insert_time;
- int insert_size;
- int insert_speed;
- int max_insert_speed;
- int measure_insert_time;
-
- /* Speed regulation negative feedback loop */
- int speed_control;
- int pipeline_head_speed;
- int controlled_pipeline_head_speed;
- int uncontrolled_pipeline_head_speed;
- int controlled_last_pipeline_head;
- unsigned long uncontrolled_pipeline_head_time;
- unsigned long controlled_pipeline_head_time;
- int controlled_previous_pipeline_head;
- int uncontrolled_previous_pipeline_head;
- unsigned long controlled_previous_head_time;
- unsigned long uncontrolled_previous_head_time;
- int restart_speed_control_req;
-
u32 debug_mask;
} idetape_tape_t;
@@ -674,128 +579,36 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
}
}
-static void idetape_activate_next_stage(ide_drive_t *drive)
+/* Free data buffers completely. */
+static void ide_tape_kfree_buffer(idetape_tape_t *tape)
{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *stage = tape->next_stage;
- struct request *rq = &stage->rq;
+ struct idetape_bh *prev_bh, *bh = tape->merge_bh;
- debug_log(DBG_PROCS, "Enter %s\n", __func__);
+ while (bh) {
+ u32 size = bh->b_size;
- if (stage == NULL) {
- printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
- " existing stage\n");
- return;
- }
+ while (size) {
+ unsigned int order = fls(size >> PAGE_SHIFT)-1;
- rq->rq_disk = tape->disk;
- rq->buffer = NULL;
- rq->special = (void *)stage->bh;
- tape->active_data_rq = rq;
- tape->active_stage = stage;
- tape->next_stage = stage->next;
-}
-
-/* Free a stage along with its related buffers completely. */
-static void __idetape_kfree_stage(idetape_stage_t *stage)
-{
- struct idetape_bh *prev_bh, *bh = stage->bh;
- int size;
-
- while (bh != NULL) {
- if (bh->b_data != NULL) {
- size = (int) bh->b_size;
- while (size > 0) {
- free_page((unsigned long) bh->b_data);
- size -= PAGE_SIZE;
- bh->b_data += PAGE_SIZE;
- }
+ if (bh->b_data)
+ free_pages((unsigned long)bh->b_data, order);
+
+ size &= (order-1);
+ bh->b_data += (1 << order) * PAGE_SIZE;
}
prev_bh = bh;
bh = bh->b_reqnext;
kfree(prev_bh);
}
- kfree(stage);
-}
-
-static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
-{
- __idetape_kfree_stage(stage);
+ kfree(tape->merge_bh);
}
-/*
- * Remove tape->first_stage from the pipeline. The caller should avoid race
- * conditions.
- */
-static void idetape_remove_stage_head(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *stage;
-
- debug_log(DBG_PROCS, "Enter %s\n", __func__);
-
- if (tape->first_stage == NULL) {
- printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
- return;
- }
- if (tape->active_stage == tape->first_stage) {
- printk(KERN_ERR "ide-tape: bug: Trying to free our active "
- "pipeline stage\n");
- return;
- }
- stage = tape->first_stage;
- tape->first_stage = stage->next;
- idetape_kfree_stage(tape, stage);
- tape->nr_stages--;
- if (tape->first_stage == NULL) {
- tape->last_stage = NULL;
- if (tape->next_stage != NULL)
- printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
- " NULL\n");
- if (tape->nr_stages)
- printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
- "now\n");
- }
-}
-
-/*
- * This will free all the pipeline stages starting from new_last_stage->next
- * to the end of the list, and point tape->last_stage to new_last_stage.
- */
-static void idetape_abort_pipeline(ide_drive_t *drive,
- idetape_stage_t *new_last_stage)
-{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *stage = new_last_stage->next;
- idetape_stage_t *nstage;
-
- debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
-
- while (stage) {
- nstage = stage->next;
- idetape_kfree_stage(tape, stage);
- --tape->nr_stages;
- --tape->nr_pending_stages;
- stage = nstage;
- }
- if (new_last_stage)
- new_last_stage->next = NULL;
- tape->last_stage = new_last_stage;
- tape->next_stage = NULL;
-}
-
-/*
- * Finish servicing a request and insert a pending pipeline request into the
- * main device queue.
- */
static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
{
struct request *rq = HWGROUP(drive)->rq;
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
int error;
- int remove_stage = 0;
- idetape_stage_t *active_stage;
debug_log(DBG_PROCS, "Enter %s\n", __func__);
@@ -815,58 +628,8 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
spin_lock_irqsave(&tape->lock, flags);
- /* The request was a pipelined data transfer request */
- if (tape->active_data_rq == rq) {
- active_stage = tape->active_stage;
- tape->active_stage = NULL;
- tape->active_data_rq = NULL;
- tape->nr_pending_stages--;
- if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
- remove_stage = 1;
- if (error) {
- set_bit(IDETAPE_FLAG_PIPELINE_ERR,
- &tape->flags);
- if (error == IDETAPE_ERROR_EOD)
- idetape_abort_pipeline(drive,
- active_stage);
- }
- } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
- if (error == IDETAPE_ERROR_EOD) {
- set_bit(IDETAPE_FLAG_PIPELINE_ERR,
- &tape->flags);
- idetape_abort_pipeline(drive, active_stage);
- }
- }
- if (tape->next_stage != NULL) {
- idetape_activate_next_stage(drive);
-
- /* Insert the next request into the request queue. */
- (void)ide_do_drive_cmd(drive, tape->active_data_rq,
- ide_end);
- } else if (!error) {
- /*
- * This is a part of the feedback loop which tries to
- * find the optimum number of stages. We are starting
- * from a minimum maximum number of stages, and if we
- * sense that the pipeline is empty, we try to increase
- * it, until we reach the user compile time memory
- * limit.
- */
- int i = (tape->max_pipeline - tape->min_pipeline) / 10;
-
- tape->max_stages += max(i, 1);
- tape->max_stages = max(tape->max_stages,
- tape->min_pipeline);
- tape->max_stages = min(tape->max_stages,
- tape->max_pipeline);
- }
- }
ide_end_drive_cmd(drive, 0, 0);
- if (remove_stage)
- idetape_remove_stage_head(drive);
- if (tape->active_data_rq == NULL)
- clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
spin_unlock_irqrestore(&tape->lock, flags);
return 0;
}
@@ -993,7 +756,7 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
stat = ide_read_status(drive);
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
- if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
+ if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
/*
* A DMA error is sometimes expected. For example,
* if the tape is crossing a filemark during a
@@ -1083,10 +846,10 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
return ide_do_reset(drive);
}
/* Get the number of bytes to transfer on this interrupt. */
- bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
- hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
+ bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
+ hwif->INB(hwif->io_ports.lbam_addr);
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if (ireason & CD) {
printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
@@ -1190,12 +953,12 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
"yet DRQ isn't asserted\n");
return startstop;
}
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
"a packet command, retrying\n");
udelay(100);
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if (retries == 0) {
printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
"issuing a packet command, ignoring\n");
@@ -1213,7 +976,7 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
#ifdef CONFIG_BLK_DEV_IDEDMA
/* Begin DMA, if necessary */
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
- hwif->dma_start(drive);
+ hwif->dma_ops->dma_start(drive);
#endif
/* Send the actual packet */
HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
@@ -1279,7 +1042,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
ide_dma_off(drive);
}
if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
- dma_ok = !hwif->dma_setup(drive);
+ dma_ok = !hwif->dma_ops->dma_setup(drive);
ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
@@ -1292,7 +1055,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
IDETAPE_WAIT_CMD, NULL);
return ide_started;
} else {
- hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
return idetape_transfer_pc(drive);
}
}
@@ -1335,69 +1098,6 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
pc->idetape_callback = &idetape_pc_callback;
}
-static void idetape_calculate_speeds(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- if (time_after(jiffies,
- tape->controlled_pipeline_head_time + 120 * HZ)) {
- tape->controlled_previous_pipeline_head =
- tape->controlled_last_pipeline_head;
- tape->controlled_previous_head_time =
- tape->controlled_pipeline_head_time;
- tape->controlled_last_pipeline_head = tape->pipeline_head;
- tape->controlled_pipeline_head_time = jiffies;
- }
- if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
- tape->controlled_pipeline_head_speed = (tape->pipeline_head -
- tape->controlled_last_pipeline_head) * 32 * HZ /
- (jiffies - tape->controlled_pipeline_head_time);
- else if (time_after(jiffies, tape->controlled_previous_head_time))
- tape->controlled_pipeline_head_speed = (tape->pipeline_head -
- tape->controlled_previous_pipeline_head) * 32 *
- HZ / (jiffies - tape->controlled_previous_head_time);
-
- if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
- /* -1 for read mode error recovery */
- if (time_after(jiffies, tape->uncontrolled_previous_head_time +
- 10 * HZ)) {
- tape->uncontrolled_pipeline_head_time = jiffies;
- tape->uncontrolled_pipeline_head_speed =
- (tape->pipeline_head -
- tape->uncontrolled_previous_pipeline_head) *
- 32 * HZ / (jiffies -
- tape->uncontrolled_previous_head_time);
- }
- } else {
- tape->uncontrolled_previous_head_time = jiffies;
- tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
- if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
- 30 * HZ))
- tape->uncontrolled_pipeline_head_time = jiffies;
-
- }
- tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
- tape->controlled_pipeline_head_speed);
-
- if (tape->speed_control == 1) {
- if (tape->nr_pending_stages >= tape->max_stages / 2)
- tape->max_insert_speed = tape->pipeline_head_speed +
- (1100 - tape->pipeline_head_speed) * 2 *
- (tape->nr_pending_stages - tape->max_stages / 2)
- / tape->max_stages;
- else
- tape->max_insert_speed = 500 +
- (tape->pipeline_head_speed - 500) * 2 *
- tape->nr_pending_stages / tape->max_stages;
-
- if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
- tape->max_insert_speed = 5000;
- } else
- tape->max_insert_speed = tape->speed_control;
-
- tape->max_insert_speed = max(tape->max_insert_speed, 500);
-}
-
static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
@@ -1432,17 +1132,7 @@ static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
int blocks = tape->pc->xferred / tape->blk_size;
tape->avg_size += blocks * tape->blk_size;
- tape->insert_size += blocks * tape->blk_size;
- if (tape->insert_size > 1024 * 1024)
- tape->measure_insert_time = 1;
- if (tape->measure_insert_time) {
- tape->measure_insert_time = 0;
- tape->insert_time = jiffies;
- tape->insert_size = 0;
- }
- if (time_after(jiffies, tape->insert_time))
- tape->insert_speed = tape->insert_size / 1024 * HZ /
- (jiffies - tape->insert_time);
+
if (time_after_eq(jiffies, tape->avg_time + HZ)) {
tape->avg_speed = tape->avg_size * HZ /
(jiffies - tape->avg_time) / 1024;
@@ -1475,7 +1165,7 @@ static void idetape_create_read_cmd(idetape_tape_t *tape,
pc->buf = NULL;
pc->buf_size = length * tape->blk_size;
pc->req_xfer = pc->buf_size;
- if (pc->req_xfer == tape->stage_size)
+ if (pc->req_xfer == tape->buffer_size)
pc->flags |= PC_FLAG_DMA_RECOMMENDED;
}
@@ -1495,7 +1185,7 @@ static void idetape_create_write_cmd(idetape_tape_t *tape,
pc->buf = NULL;
pc->buf_size = length * tape->blk_size;
pc->req_xfer = pc->buf_size;
- if (pc->req_xfer == tape->stage_size)
+ if (pc->req_xfer == tape->buffer_size)
pc->flags |= PC_FLAG_DMA_RECOMMENDED;
}
@@ -1547,10 +1237,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
drive->post_reset = 0;
}
- if (time_after(jiffies, tape->insert_time))
- tape->insert_speed = tape->insert_size / 1024 * HZ /
- (jiffies - tape->insert_time);
- idetape_calculate_speeds(drive);
if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
(stat & SEEK_STAT) == 0) {
if (postponed_rq == NULL) {
@@ -1574,16 +1260,12 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
return ide_stopped;
}
if (rq->cmd[0] & REQ_IDETAPE_READ) {
- tape->buffer_head++;
- tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
(struct idetape_bh *)rq->special);
goto out;
}
if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
- tape->buffer_head++;
- tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
(struct idetape_bh *)rq->special);
@@ -1604,111 +1286,91 @@ out:
return idetape_issue_pc(drive, pc);
}
-/* Pipeline related functions */
-static inline int idetape_pipeline_active(idetape_tape_t *tape)
-{
- int rc1, rc2;
-
- rc1 = test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
- rc2 = (tape->active_data_rq != NULL);
- return rc1;
-}
-
/*
- * The function below uses __get_free_page to allocate a pipeline stage, along
- * with all the necessary small buffers which together make a buffer of size
- * tape->stage_size (or a bit more). We attempt to combine sequential pages as
+ * The function below uses __get_free_pages to allocate a data buffer of size
+ * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
* much as possible.
*
- * It returns a pointer to the new allocated stage, or NULL if we can't (or
- * don't want to) allocate a stage.
- *
- * Pipeline stages are optional and are used to increase performance. If we
- * can't allocate them, we'll manage without them.
+ * It returns a pointer to the newly allocated buffer, or NULL in case of
+ * failure.
*/
-static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
- int clear)
+static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
+ int full, int clear)
{
- idetape_stage_t *stage;
- struct idetape_bh *prev_bh, *bh;
- int pages = tape->pages_per_stage;
+ struct idetape_bh *prev_bh, *bh, *merge_bh;
+ int pages = tape->pages_per_buffer;
+ unsigned int order, b_allocd;
char *b_data = NULL;
- stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
- if (!stage)
- return NULL;
- stage->next = NULL;
-
- stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
- bh = stage->bh;
+ merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
+ bh = merge_bh;
if (bh == NULL)
goto abort;
- bh->b_reqnext = NULL;
- bh->b_data = (char *) __get_free_page(GFP_KERNEL);
+
+ order = fls(pages) - 1;
+ bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
if (!bh->b_data)
goto abort;
+ b_allocd = (1 << order) * PAGE_SIZE;
+ pages &= (order-1);
+
if (clear)
- memset(bh->b_data, 0, PAGE_SIZE);
- bh->b_size = PAGE_SIZE;
+ memset(bh->b_data, 0, b_allocd);
+ bh->b_reqnext = NULL;
+ bh->b_size = b_allocd;
atomic_set(&bh->b_count, full ? bh->b_size : 0);
- while (--pages) {
- b_data = (char *) __get_free_page(GFP_KERNEL);
+ while (pages) {
+ order = fls(pages) - 1;
+ b_data = (char *) __get_free_pages(GFP_KERNEL, order);
if (!b_data)
goto abort;
+ b_allocd = (1 << order) * PAGE_SIZE;
+
if (clear)
- memset(b_data, 0, PAGE_SIZE);
- if (bh->b_data == b_data + PAGE_SIZE) {
- bh->b_size += PAGE_SIZE;
- bh->b_data -= PAGE_SIZE;
+ memset(b_data, 0, b_allocd);
+
+ /* newly allocated page frames below buffer header or ...*/
+ if (bh->b_data == b_data + b_allocd) {
+ bh->b_size += b_allocd;
+ bh->b_data -= b_allocd;
if (full)
- atomic_add(PAGE_SIZE, &bh->b_count);
+ atomic_add(b_allocd, &bh->b_count);
continue;
}
+ /* they are above the header */
if (b_data == bh->b_data + bh->b_size) {
- bh->b_size += PAGE_SIZE;
+ bh->b_size += b_allocd;
if (full)
- atomic_add(PAGE_SIZE, &bh->b_count);
+ atomic_add(b_allocd, &bh->b_count);
continue;
}
prev_bh = bh;
bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
if (!bh) {
- free_page((unsigned long) b_data);
+ free_pages((unsigned long) b_data, order);
goto abort;
}
bh->b_reqnext = NULL;
bh->b_data = b_data;
- bh->b_size = PAGE_SIZE;
+ bh->b_size = b_allocd;
atomic_set(&bh->b_count, full ? bh->b_size : 0);
prev_bh->b_reqnext = bh;
+
+ pages &= (order-1);
}
+
bh->b_size -= tape->excess_bh_size;
if (full)
atomic_sub(tape->excess_bh_size, &bh->b_count);
- return stage;
+ return merge_bh;
abort:
- __idetape_kfree_stage(stage);
+ ide_tape_kfree_buffer(tape);
return NULL;
}
-static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
-{
- idetape_stage_t *cache_stage = tape->cache_stage;
-
- debug_log(DBG_PROCS, "Enter %s\n", __func__);
-
- if (tape->nr_stages >= tape->max_stages)
- return NULL;
- if (cache_stage != NULL) {
- tape->cache_stage = NULL;
- return cache_stage;
- }
- return __idetape_kmalloc_stage(tape, 0, 0);
-}
-
static int idetape_copy_stage_from_user(idetape_tape_t *tape,
- idetape_stage_t *stage, const char __user *buf, int n)
+ const char __user *buf, int n)
{
struct idetape_bh *bh = tape->bh;
int count;
@@ -1740,7 +1402,7 @@ static int idetape_copy_stage_from_user(idetape_tape_t *tape,
}
static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
- idetape_stage_t *stage, int n)
+ int n)
{
struct idetape_bh *bh = tape->bh;
int count;
@@ -1771,11 +1433,11 @@ static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
return ret;
}
-static void idetape_init_merge_stage(idetape_tape_t *tape)
+static void idetape_init_merge_buffer(idetape_tape_t *tape)
{
- struct idetape_bh *bh = tape->merge_stage->bh;
+ struct idetape_bh *bh = tape->merge_bh;
+ tape->bh = tape->merge_bh;
- tape->bh = bh;
if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
atomic_set(&bh->b_count, 0);
else {
@@ -1784,61 +1446,6 @@ static void idetape_init_merge_stage(idetape_tape_t *tape)
}
}
-static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
-{
- struct idetape_bh *tmp;
-
- tmp = stage->bh;
- stage->bh = tape->merge_stage->bh;
- tape->merge_stage->bh = tmp;
- idetape_init_merge_stage(tape);
-}
-
-/* Add a new stage at the end of the pipeline. */
-static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
-{
- idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
-
- debug_log(DBG_PROCS, "Enter %s\n", __func__);
-
- spin_lock_irqsave(&tape->lock, flags);
- stage->next = NULL;
- if (tape->last_stage != NULL)
- tape->last_stage->next = stage;
- else
- tape->first_stage = stage;
- tape->next_stage = stage;
- tape->last_stage = stage;
- if (tape->next_stage == NULL)
- tape->next_stage = tape->last_stage;
- tape->nr_stages++;
- tape->nr_pending_stages++;
- spin_unlock_irqrestore(&tape->lock, flags);
-}
-
-/* Install a completion in a pending request and sleep until it is serviced. The
- * caller should ensure that the request will not be serviced before we install
- * the completion (usually by disabling interrupts).
- */
-static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- idetape_tape_t *tape = drive->driver_data;
-
- if (rq == NULL || !blk_special_request(rq)) {
- printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
- " request\n");
- return;
- }
- rq->end_io_data = &wait;
- rq->end_io = blk_end_sync_rq;
- spin_unlock_irq(&tape->lock);
- wait_for_completion(&wait);
- /* The stage and its struct request have been deallocated */
- spin_lock_irq(&tape->lock);
-}
-
static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
@@ -1907,7 +1514,7 @@ static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
* to the request list without waiting for it to be serviced! In that case, we
* usually use idetape_queue_pc_head().
*/
-static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
+static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
{
struct ide_tape_obj *tape = drive->driver_data;
struct request rq;
@@ -1939,7 +1546,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
timeout += jiffies;
while (time_before(jiffies, timeout)) {
idetape_create_test_unit_ready_cmd(&pc);
- if (!__idetape_queue_pc_tail(drive, &pc))
+ if (!idetape_queue_pc_tail(drive, &pc))
return 0;
if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
|| (tape->asc == 0x3A)) {
@@ -1948,7 +1555,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
return -ENOMEDIUM;
idetape_create_load_unload_cmd(drive, &pc,
IDETAPE_LU_LOAD_MASK);
- __idetape_queue_pc_tail(drive, &pc);
+ idetape_queue_pc_tail(drive, &pc);
load_attempted = 1;
/* not about to be ready */
} else if (!(tape->sense_key == 2 && tape->asc == 4 &&
@@ -1959,11 +1566,6 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
return -EIO;
}
-static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
- return __idetape_queue_pc_tail(drive, pc);
-}
-
static int idetape_flush_tape_buffers(ide_drive_t *drive)
{
struct ide_atapi_pc pc;
@@ -2029,50 +1631,21 @@ static int idetape_create_prevent_cmd(ide_drive_t *drive,
return 1;
}
-static int __idetape_discard_read_pipeline(ide_drive_t *drive)
+static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
- int cnt;
if (tape->chrdev_dir != IDETAPE_DIR_READ)
- return 0;
+ return;
- /* Remove merge stage. */
- cnt = tape->merge_stage_size / tape->blk_size;
- if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
- ++cnt; /* Filemarks count as 1 sector */
- tape->merge_stage_size = 0;
- if (tape->merge_stage != NULL) {
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
+ tape->merge_bh_size = 0;
+ if (tape->merge_bh != NULL) {
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
}
- /* Clear pipeline flags. */
- clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
tape->chrdev_dir = IDETAPE_DIR_NONE;
-
- /* Remove pipeline stages. */
- if (tape->first_stage == NULL)
- return 0;
-
- spin_lock_irqsave(&tape->lock, flags);
- tape->next_stage = NULL;
- if (idetape_pipeline_active(tape))
- idetape_wait_for_request(drive, tape->active_data_rq);
- spin_unlock_irqrestore(&tape->lock, flags);
-
- while (tape->first_stage != NULL) {
- struct request *rq_ptr = &tape->first_stage->rq;
-
- cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
- if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
- ++cnt;
- idetape_remove_stage_head(drive);
- }
- tape->nr_pending_stages = 0;
- tape->max_stages = tape->min_pipeline;
- return cnt;
}
/*
@@ -2089,7 +1662,7 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
struct ide_atapi_pc pc;
if (tape->chrdev_dir == IDETAPE_DIR_READ)
- __idetape_discard_read_pipeline(drive);
+ __ide_tape_discard_merge_buffer(drive);
idetape_wait_ready(drive, 60 * 5 * HZ);
idetape_create_locate_cmd(drive, &pc, block, partition, skip);
retval = idetape_queue_pc_tail(drive, &pc);
@@ -2100,20 +1673,19 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
return (idetape_queue_pc_tail(drive, &pc));
}
-static void idetape_discard_read_pipeline(ide_drive_t *drive,
+static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
int restore_position)
{
idetape_tape_t *tape = drive->driver_data;
- int cnt;
int seek, position;
- cnt = __idetape_discard_read_pipeline(drive);
+ __ide_tape_discard_merge_buffer(drive);
if (restore_position) {
position = idetape_read_position(drive);
- seek = position > cnt ? position - cnt : 0;
+ seek = position > 0 ? position : 0;
if (idetape_position_tape(drive, seek, 0, 0)) {
printk(KERN_INFO "ide-tape: %s: position_tape failed in"
- " discard_pipeline()\n", tape->name);
+ " %s\n", tape->name, __func__);
return;
}
}
@@ -2131,12 +1703,6 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
- if (idetape_pipeline_active(tape)) {
- printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
- __func__);
- return (0);
- }
-
idetape_init_rq(&rq, cmd);
rq.rq_disk = tape->disk;
rq.special = (void *)bh;
@@ -2148,27 +1714,13 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
return 0;
- if (tape->merge_stage)
- idetape_init_merge_stage(tape);
+ if (tape->merge_bh)
+ idetape_init_merge_buffer(tape);
if (rq.errors == IDETAPE_ERROR_GENERAL)
return -EIO;
return (tape->blk_size * (blocks-rq.current_nr_sectors));
}
-/* start servicing the pipeline stages, starting from tape->next_stage. */
-static void idetape_plug_pipeline(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
-
- if (tape->next_stage == NULL)
- return;
- if (!idetape_pipeline_active(tape)) {
- set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
- idetape_activate_next_stage(drive);
- (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
- }
-}
-
static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
{
idetape_init_pc(pc);
@@ -2206,135 +1758,39 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
pc->idetape_callback = &idetape_pc_callback;
}
-static void idetape_wait_first_stage(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
-
- if (tape->first_stage == NULL)
- return;
- spin_lock_irqsave(&tape->lock, flags);
- if (tape->active_stage == tape->first_stage)
- idetape_wait_for_request(drive, tape->active_data_rq);
- spin_unlock_irqrestore(&tape->lock, flags);
-}
-
-/*
- * Try to add a character device originated write request to our pipeline. In
- * case we don't succeed, we revert to non-pipelined operation mode for this
- * request. In order to accomplish that, we
- *
- * 1. Try to allocate a new pipeline stage.
- * 2. If we can't, wait for more and more requests to be serviced and try again
- * each time.
- * 3. If we still can't allocate a stage, fallback to non-pipelined operation
- * mode for this request.
- */
+/* Queue up a character device originated write request. */
static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
{
idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *new_stage;
- unsigned long flags;
- struct request *rq;
debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
- /* Attempt to allocate a new stage. Beware possible race conditions. */
- while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
- spin_lock_irqsave(&tape->lock, flags);
- if (idetape_pipeline_active(tape)) {
- idetape_wait_for_request(drive, tape->active_data_rq);
- spin_unlock_irqrestore(&tape->lock, flags);
- } else {
- spin_unlock_irqrestore(&tape->lock, flags);
- idetape_plug_pipeline(drive);
- if (idetape_pipeline_active(tape))
- continue;
- /*
- * The machine is short on memory. Fallback to non-
- * pipelined operation mode for this request.
- */
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
- blocks, tape->merge_stage->bh);
- }
- }
- rq = &new_stage->rq;
- idetape_init_rq(rq, REQ_IDETAPE_WRITE);
- /* Doesn't actually matter - We always assume sequential access */
- rq->sector = tape->first_frame;
- rq->current_nr_sectors = blocks;
- rq->nr_sectors = blocks;
-
- idetape_switch_buffers(tape, new_stage);
- idetape_add_stage_tail(drive, new_stage);
- tape->pipeline_head++;
- idetape_calculate_speeds(drive);
-
- /*
- * Estimate whether the tape has stopped writing by checking if our
- * write pipeline is currently empty. If we are not writing anymore,
- * wait for the pipeline to be almost completely full (90%) before
- * starting to service requests, so that we will be able to keep up with
- * the higher speeds of the tape.
- */
- if (!idetape_pipeline_active(tape)) {
- if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
- tape->nr_stages >= tape->max_stages -
- tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
- tape->blk_size) {
- tape->measure_insert_time = 1;
- tape->insert_time = jiffies;
- tape->insert_size = 0;
- tape->insert_speed = 0;
- idetape_plug_pipeline(drive);
- }
- }
- if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
- /* Return a deferred error */
- return -EIO;
- return blocks;
-}
-
-/*
- * Wait until all pending pipeline requests are serviced. Typically called on
- * device close.
- */
-static void idetape_wait_for_pipeline(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
-
- while (tape->next_stage || idetape_pipeline_active(tape)) {
- idetape_plug_pipeline(drive);
- spin_lock_irqsave(&tape->lock, flags);
- if (idetape_pipeline_active(tape))
- idetape_wait_for_request(drive, tape->active_data_rq);
- spin_unlock_irqrestore(&tape->lock, flags);
- }
+ return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
+ blocks, tape->merge_bh);
}
-static void idetape_empty_write_pipeline(ide_drive_t *drive)
+static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
int blocks, min;
struct idetape_bh *bh;
if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
- printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
+ printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
" but we are not writing.\n");
return;
}
- if (tape->merge_stage_size > tape->stage_size) {
+ if (tape->merge_bh_size > tape->buffer_size) {
printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
- tape->merge_stage_size = tape->stage_size;
+ tape->merge_bh_size = tape->buffer_size;
}
- if (tape->merge_stage_size) {
- blocks = tape->merge_stage_size / tape->blk_size;
- if (tape->merge_stage_size % tape->blk_size) {
+ if (tape->merge_bh_size) {
+ blocks = tape->merge_bh_size / tape->blk_size;
+ if (tape->merge_bh_size % tape->blk_size) {
unsigned int i;
blocks++;
- i = tape->blk_size - tape->merge_stage_size %
+ i = tape->blk_size - tape->merge_bh_size %
tape->blk_size;
bh = tape->bh->b_reqnext;
while (bh) {
@@ -2358,74 +1814,33 @@ static void idetape_empty_write_pipeline(ide_drive_t *drive)
}
}
(void) idetape_add_chrdev_write_request(drive, blocks);
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
}
- idetape_wait_for_pipeline(drive);
- if (tape->merge_stage != NULL) {
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ if (tape->merge_bh != NULL) {
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
}
- clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
tape->chrdev_dir = IDETAPE_DIR_NONE;
-
- /*
- * On the next backup, perform the feedback loop again. (I don't want to
- * keep sense information between backups, as some systems are
- * constantly on, and the system load can be totally different on the
- * next backup).
- */
- tape->max_stages = tape->min_pipeline;
- if (tape->first_stage != NULL ||
- tape->next_stage != NULL ||
- tape->last_stage != NULL ||
- tape->nr_stages != 0) {
- printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
- "first_stage %p, next_stage %p, "
- "last_stage %p, nr_stages %d\n",
- tape->first_stage, tape->next_stage,
- tape->last_stage, tape->nr_stages);
- }
}
-static void idetape_restart_speed_control(ide_drive_t *drive)
+static int idetape_init_read(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-
- tape->restart_speed_control_req = 0;
- tape->pipeline_head = 0;
- tape->controlled_last_pipeline_head = 0;
- tape->controlled_previous_pipeline_head = 0;
- tape->uncontrolled_previous_pipeline_head = 0;
- tape->controlled_pipeline_head_speed = 5000;
- tape->pipeline_head_speed = 5000;
- tape->uncontrolled_pipeline_head_speed = 0;
- tape->controlled_pipeline_head_time =
- tape->uncontrolled_pipeline_head_time = jiffies;
- tape->controlled_previous_head_time =
- tape->uncontrolled_previous_head_time = jiffies;
-}
-
-static int idetape_init_read(ide_drive_t *drive, int max_stages)
-{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *new_stage;
- struct request rq;
int bytes_read;
- u16 blocks = *(u16 *)&tape->caps[12];
/* Initialize read operation */
if (tape->chrdev_dir != IDETAPE_DIR_READ) {
if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- idetape_empty_write_pipeline(drive);
+ ide_tape_flush_merge_buffer(drive);
idetape_flush_tape_buffers(drive);
}
- if (tape->merge_stage || tape->merge_stage_size) {
- printk(KERN_ERR "ide-tape: merge_stage_size should be"
+ if (tape->merge_bh || tape->merge_bh_size) {
+ printk(KERN_ERR "ide-tape: merge_bh_size should be"
" 0 now\n");
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
}
- tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
- if (!tape->merge_stage)
+ tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
+ if (!tape->merge_bh)
return -ENOMEM;
tape->chrdev_dir = IDETAPE_DIR_READ;
@@ -2438,54 +1853,23 @@ static int idetape_init_read(ide_drive_t *drive, int max_stages)
if (drive->dsc_overlap) {
bytes_read = idetape_queue_rw_tail(drive,
REQ_IDETAPE_READ, 0,
- tape->merge_stage->bh);
+ tape->merge_bh);
if (bytes_read < 0) {
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
tape->chrdev_dir = IDETAPE_DIR_NONE;
return bytes_read;
}
}
}
- if (tape->restart_speed_control_req)
- idetape_restart_speed_control(drive);
- idetape_init_rq(&rq, REQ_IDETAPE_READ);
- rq.sector = tape->first_frame;
- rq.nr_sectors = blocks;
- rq.current_nr_sectors = blocks;
- if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) &&
- tape->nr_stages < max_stages) {
- new_stage = idetape_kmalloc_stage(tape);
- while (new_stage != NULL) {
- new_stage->rq = rq;
- idetape_add_stage_tail(drive, new_stage);
- if (tape->nr_stages >= max_stages)
- break;
- new_stage = idetape_kmalloc_stage(tape);
- }
- }
- if (!idetape_pipeline_active(tape)) {
- if (tape->nr_pending_stages >= 3 * max_stages / 4) {
- tape->measure_insert_time = 1;
- tape->insert_time = jiffies;
- tape->insert_size = 0;
- tape->insert_speed = 0;
- idetape_plug_pipeline(drive);
- }
- }
+
return 0;
}
-/*
- * Called from idetape_chrdev_read() to service a character device read request
- * and add read-ahead requests to our pipeline.
- */
+/* called from idetape_chrdev_read() to service a chrdev read request. */
static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
{
idetape_tape_t *tape = drive->driver_data;
- unsigned long flags;
- struct request *rq_ptr;
- int bytes_read;
debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
@@ -2493,39 +1877,10 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
return 0;
- /* Wait for the next block to reach the head of the pipeline. */
- idetape_init_read(drive, tape->max_stages);
- if (tape->first_stage == NULL) {
- if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
- return 0;
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
- tape->merge_stage->bh);
- }
- idetape_wait_first_stage(drive);
- rq_ptr = &tape->first_stage->rq;
- bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
- rq_ptr->current_nr_sectors);
- rq_ptr->nr_sectors = 0;
- rq_ptr->current_nr_sectors = 0;
+ idetape_init_read(drive);
- if (rq_ptr->errors == IDETAPE_ERROR_EOD)
- return 0;
- else {
- idetape_switch_buffers(tape, tape->first_stage);
- if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
- set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
- spin_lock_irqsave(&tape->lock, flags);
- idetape_remove_stage_head(drive);
- spin_unlock_irqrestore(&tape->lock, flags);
- tape->pipeline_head++;
- idetape_calculate_speeds(drive);
- }
- if (bytes_read > blocks * tape->blk_size) {
- printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
- " than requested\n");
- bytes_read = blocks * tape->blk_size;
- }
- return (bytes_read);
+ return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
+ tape->merge_bh);
}
static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
@@ -2537,8 +1892,8 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
while (bcount) {
unsigned int count;
- bh = tape->merge_stage->bh;
- count = min(tape->stage_size, bcount);
+ bh = tape->merge_bh;
+ count = min(tape->buffer_size, bcount);
bcount -= count;
blocks = count / tape->blk_size;
while (count) {
@@ -2549,31 +1904,10 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
bh = bh->b_reqnext;
}
idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
- tape->merge_stage->bh);
+ tape->merge_bh);
}
}
-static int idetape_pipeline_size(ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- idetape_stage_t *stage;
- struct request *rq;
- int size = 0;
-
- idetape_wait_for_pipeline(drive);
- stage = tape->first_stage;
- while (stage != NULL) {
- rq = &stage->rq;
- size += tape->blk_size * (rq->nr_sectors -
- rq->current_nr_sectors);
- if (rq->errors == IDETAPE_ERROR_FILEMARK)
- size += tape->blk_size;
- stage = stage->next;
- }
- size += tape->merge_stage_size;
- return size;
-}
-
/*
* Rewinds the tape to the Beginning Of the current Partition (BOP). We
* currently support only one partition.
@@ -2619,11 +1953,10 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
if (copy_from_user(&config, argp, sizeof(config)))
return -EFAULT;
tape->best_dsc_rw_freq = config.dsc_rw_frequency;
- tape->max_stages = config.nr_stages;
break;
case 0x0350:
config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
- config.nr_stages = tape->max_stages;
+ config.nr_stages = 1;
if (copy_to_user(argp, &config, sizeof(config)))
return -EFAULT;
break;
@@ -2633,19 +1966,11 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
return 0;
}
-/*
- * The function below is now a bit more complicated than just passing the
- * command to the tape since we may have crossed some filemarks during our
- * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
- * support MTFSFM when the filemark is in our internal pipeline even if the tape
- * doesn't support spacing over filemarks in the reverse direction.
- */
static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
int mt_count)
{
idetape_tape_t *tape = drive->driver_data;
struct ide_atapi_pc pc;
- unsigned long flags;
int retval, count = 0;
int sprev = !!(tape->caps[4] & 0x20);
@@ -2658,48 +1983,12 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
}
if (tape->chrdev_dir == IDETAPE_DIR_READ) {
- /* its a read-ahead buffer, scan it for crossed filemarks. */
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
++count;
- while (tape->first_stage != NULL) {
- if (count == mt_count) {
- if (mt_op == MTFSFM)
- set_bit(IDETAPE_FLAG_FILEMARK,
- &tape->flags);
- return 0;
- }
- spin_lock_irqsave(&tape->lock, flags);
- if (tape->first_stage == tape->active_stage) {
- /*
- * We have reached the active stage in the read
- * pipeline. There is no point in allowing the
- * drive to continue reading any farther, so we
- * stop the pipeline.
- *
- * This section should be moved to a separate
- * subroutine because similar operations are
- * done in __idetape_discard_read_pipeline(),
- * for example.
- */
- tape->next_stage = NULL;
- spin_unlock_irqrestore(&tape->lock, flags);
- idetape_wait_first_stage(drive);
- tape->next_stage = tape->first_stage->next;
- } else
- spin_unlock_irqrestore(&tape->lock, flags);
- if (tape->first_stage->rq.errors ==
- IDETAPE_ERROR_FILEMARK)
- ++count;
- idetape_remove_stage_head(drive);
- }
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
}
- /*
- * The filemark was not found in our internal pipeline; now we can issue
- * the space command.
- */
switch (mt_op) {
case MTFSF:
case MTBSF:
@@ -2755,27 +2044,25 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
(count % tape->blk_size) == 0)
tape->user_bs_factor = count / tape->blk_size;
}
- rc = idetape_init_read(drive, tape->max_stages);
+ rc = idetape_init_read(drive);
if (rc < 0)
return rc;
if (count == 0)
return (0);
- if (tape->merge_stage_size) {
- actually_read = min((unsigned int)(tape->merge_stage_size),
+ if (tape->merge_bh_size) {
+ actually_read = min((unsigned int)(tape->merge_bh_size),
(unsigned int)count);
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
- actually_read))
+ if (idetape_copy_stage_to_user(tape, buf, actually_read))
ret = -EFAULT;
buf += actually_read;
- tape->merge_stage_size -= actually_read;
+ tape->merge_bh_size -= actually_read;
count -= actually_read;
}
- while (count >= tape->stage_size) {
+ while (count >= tape->buffer_size) {
bytes_read = idetape_add_chrdev_read_request(drive, ctl);
if (bytes_read <= 0)
goto finish;
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
- bytes_read))
+ if (idetape_copy_stage_to_user(tape, buf, bytes_read))
ret = -EFAULT;
buf += bytes_read;
count -= bytes_read;
@@ -2786,11 +2073,10 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
if (bytes_read <= 0)
goto finish;
temp = min((unsigned long)count, (unsigned long)bytes_read);
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
- temp))
+ if (idetape_copy_stage_to_user(tape, buf, temp))
ret = -EFAULT;
actually_read += temp;
- tape->merge_stage_size = bytes_read-temp;
+ tape->merge_bh_size = bytes_read-temp;
}
finish:
if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
@@ -2821,17 +2107,17 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
/* Initialize write operation */
if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
if (tape->chrdev_dir == IDETAPE_DIR_READ)
- idetape_discard_read_pipeline(drive, 1);
- if (tape->merge_stage || tape->merge_stage_size) {
- printk(KERN_ERR "ide-tape: merge_stage_size "
+ ide_tape_discard_merge_buffer(drive, 1);
+ if (tape->merge_bh || tape->merge_bh_size) {
+ printk(KERN_ERR "ide-tape: merge_bh_size "
"should be 0 now\n");
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
}
- tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
- if (!tape->merge_stage)
+ tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
+ if (!tape->merge_bh)
return -ENOMEM;
tape->chrdev_dir = IDETAPE_DIR_WRITE;
- idetape_init_merge_stage(tape);
+ idetape_init_merge_buffer(tape);
/*
* Issue a write 0 command to ensure that DSC handshake is
@@ -2842,10 +2128,10 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
if (drive->dsc_overlap) {
ssize_t retval = idetape_queue_rw_tail(drive,
REQ_IDETAPE_WRITE, 0,
- tape->merge_stage->bh);
+ tape->merge_bh);
if (retval < 0) {
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
tape->chrdev_dir = IDETAPE_DIR_NONE;
return retval;
}
@@ -2853,49 +2139,44 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
}
if (count == 0)
return (0);
- if (tape->restart_speed_control_req)
- idetape_restart_speed_control(drive);
- if (tape->merge_stage_size) {
- if (tape->merge_stage_size >= tape->stage_size) {
+ if (tape->merge_bh_size) {
+ if (tape->merge_bh_size >= tape->buffer_size) {
printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
}
actually_written = min((unsigned int)
- (tape->stage_size - tape->merge_stage_size),
+ (tape->buffer_size - tape->merge_bh_size),
(unsigned int)count);
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
- actually_written))
+ if (idetape_copy_stage_from_user(tape, buf, actually_written))
ret = -EFAULT;
buf += actually_written;
- tape->merge_stage_size += actually_written;
+ tape->merge_bh_size += actually_written;
count -= actually_written;
- if (tape->merge_stage_size == tape->stage_size) {
+ if (tape->merge_bh_size == tape->buffer_size) {
ssize_t retval;
- tape->merge_stage_size = 0;
+ tape->merge_bh_size = 0;
retval = idetape_add_chrdev_write_request(drive, ctl);
if (retval <= 0)
return (retval);
}
}
- while (count >= tape->stage_size) {
+ while (count >= tape->buffer_size) {
ssize_t retval;
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
- tape->stage_size))
+ if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
ret = -EFAULT;
- buf += tape->stage_size;
- count -= tape->stage_size;
+ buf += tape->buffer_size;
+ count -= tape->buffer_size;
retval = idetape_add_chrdev_write_request(drive, ctl);
- actually_written += tape->stage_size;
+ actually_written += tape->buffer_size;
if (retval <= 0)
return (retval);
}
if (count) {
actually_written += count;
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
- count))
+ if (idetape_copy_stage_from_user(tape, buf, count))
ret = -EFAULT;
- tape->merge_stage_size += count;
+ tape->merge_bh_size += count;
}
return ret ? ret : actually_written;
}
@@ -2919,8 +2200,7 @@ static int idetape_write_filemark(ide_drive_t *drive)
*
* Note: MTBSF and MTBSFM are not supported when the tape doesn't support
* spacing over filemarks in the reverse direction. In this case, MTFSFM is also
- * usually not supported (it is supported in the rare case in which we crossed
- * the filemark during our read-ahead pipelined operation mode).
+ * usually not supported.
*
* The following commands are currently not supported:
*
@@ -2936,7 +2216,6 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
mt_op, mt_count);
- /* Commands which need our pipelined read-ahead stages. */
switch (mt_op) {
case MTFSF:
case MTFSFM:
@@ -2953,7 +2232,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
case MTWEOF:
if (tape->write_prot)
return -EACCES;
- idetape_discard_read_pipeline(drive, 1);
+ ide_tape_discard_merge_buffer(drive, 1);
for (i = 0; i < mt_count; i++) {
retval = idetape_write_filemark(drive);
if (retval)
@@ -2961,12 +2240,12 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
}
return 0;
case MTREW:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
if (idetape_rewind_tape(drive))
return -EIO;
return 0;
case MTLOAD:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
idetape_create_load_unload_cmd(drive, &pc,
IDETAPE_LU_LOAD_MASK);
return idetape_queue_pc_tail(drive, &pc);
@@ -2981,7 +2260,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
if (!idetape_queue_pc_tail(drive, &pc))
tape->door_locked = DOOR_UNLOCKED;
}
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
idetape_create_load_unload_cmd(drive, &pc,
!IDETAPE_LU_LOAD_MASK);
retval = idetape_queue_pc_tail(drive, &pc);
@@ -2989,10 +2268,10 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
return retval;
case MTNOP:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
return idetape_flush_tape_buffers(drive);
case MTRETEN:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
idetape_create_load_unload_cmd(drive, &pc,
IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
return idetape_queue_pc_tail(drive, &pc);
@@ -3014,11 +2293,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
return 0;
case MTSEEK:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
return idetape_position_tape(drive,
mt_count * tape->user_bs_factor, tape->partition, 0);
case MTSETPART:
- idetape_discard_read_pipeline(drive, 0);
+ ide_tape_discard_merge_buffer(drive, 0);
return idetape_position_tape(drive, 0, mt_count, 0);
case MTFSR:
case MTBSR:
@@ -3063,13 +2342,12 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
- tape->restart_speed_control_req = 1;
if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
- idetape_empty_write_pipeline(drive);
+ ide_tape_flush_merge_buffer(drive);
idetape_flush_tape_buffers(drive);
}
if (cmd == MTIOCGET || cmd == MTIOCPOS) {
- block_offset = idetape_pipeline_size(drive) /
+ block_offset = tape->merge_bh_size /
(tape->blk_size * tape->user_bs_factor);
position = idetape_read_position(drive);
if (position < 0)
@@ -3101,7 +2379,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
return 0;
default:
if (tape->chrdev_dir == IDETAPE_DIR_READ)
- idetape_discard_read_pipeline(drive, 1);
+ ide_tape_discard_merge_buffer(drive, 1);
return idetape_blkdev_ioctl(drive, cmd, arg);
}
}
@@ -3175,9 +2453,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
(void)idetape_rewind_tape(drive);
- if (tape->chrdev_dir != IDETAPE_DIR_READ)
- clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
-
/* Read block size and write protect status from drive. */
ide_tape_get_bsize_from_bdesc(drive);
@@ -3206,8 +2481,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
}
}
}
- idetape_restart_speed_control(drive);
- tape->restart_speed_control_req = 0;
return 0;
out_put_tape:
@@ -3219,13 +2492,13 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
{
idetape_tape_t *tape = drive->driver_data;
- idetape_empty_write_pipeline(drive);
- tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
- if (tape->merge_stage != NULL) {
+ ide_tape_flush_merge_buffer(drive);
+ tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0);
+ if (tape->merge_bh != NULL) {
idetape_pad_zeros(drive, tape->blk_size *
(tape->user_bs_factor - 1));
- __idetape_kfree_stage(tape->merge_stage);
- tape->merge_stage = NULL;
+ ide_tape_kfree_buffer(tape);
+ tape->merge_bh = NULL;
}
idetape_write_filemark(drive);
idetape_flush_tape_buffers(drive);
@@ -3248,14 +2521,9 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
idetape_write_release(drive, minor);
if (tape->chrdev_dir == IDETAPE_DIR_READ) {
if (minor < 128)
- idetape_discard_read_pipeline(drive, 1);
- else
- idetape_wait_for_pipeline(drive);
- }
- if (tape->cache_stage != NULL) {
- __idetape_kfree_stage(tape->cache_stage);
- tape->cache_stage = NULL;
+ ide_tape_discard_merge_buffer(drive, 1);
}
+
if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
(void) idetape_rewind_tape(drive);
if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
@@ -3392,33 +2660,15 @@ static void idetape_add_settings(ide_drive_t *drive)
ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
1, 2, (u16 *)&tape->caps[16], NULL);
- ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
- tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
- ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
- tape->stage_size / 1024, 1, &tape->max_stages, NULL);
- ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
- tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
- ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
- 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
- NULL);
- ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
- 0xffff, tape->stage_size / 1024, 1,
- &tape->nr_pending_stages, NULL);
ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
1, 1, (u16 *)&tape->caps[14], NULL);
- ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
- 1024, &tape->stage_size, NULL);
+ ide_add_setting(drive, "buffer_size", SETTING_READ, TYPE_INT, 0, 0xffff,
+ 1, 1024, &tape->buffer_size, NULL);
ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
NULL);
ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
1, &drive->dsc_overlap, NULL);
- ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
- 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
- NULL);
- ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
- 0, 0xffff, 1, 1,
- &tape->uncontrolled_pipeline_head_speed, NULL);
ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
1, 1, &tape->avg_speed, NULL);
ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
@@ -3441,11 +2691,10 @@ static inline void idetape_add_settings(ide_drive_t *drive) { ; }
*/
static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
{
- unsigned long t1, tmid, tn, t;
+ unsigned long t;
int speed;
- int stage_size;
+ int buffer_size;
u8 gcw[2];
- struct sysinfo si;
u16 *ctl = (u16 *)&tape->caps[12];
spin_lock_init(&tape->lock);
@@ -3464,65 +2713,33 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
tape->name[2] = '0' + minor;
tape->chrdev_dir = IDETAPE_DIR_NONE;
tape->pc = tape->pc_stack;
- tape->max_insert_speed = 10000;
- tape->speed_control = 1;
*((unsigned short *) &gcw) = drive->id->config;
/* Command packet DRQ type */
if (((gcw[0] & 0x60) >> 5) == 1)
set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
- tape->min_pipeline = 10;
- tape->max_pipeline = 10;
- tape->max_stages = 10;
-
idetape_get_inquiry_results(drive);
idetape_get_mode_sense_results(drive);
ide_tape_get_bsize_from_bdesc(drive);
tape->user_bs_factor = 1;
- tape->stage_size = *ctl * tape->blk_size;
- while (tape->stage_size > 0xffff) {
+ tape->buffer_size = *ctl * tape->blk_size;
+ while (tape->buffer_size > 0xffff) {
printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
*ctl /= 2;
- tape->stage_size = *ctl * tape->blk_size;
+ tape->buffer_size = *ctl * tape->blk_size;
}
- stage_size = tape->stage_size;
- tape->pages_per_stage = stage_size / PAGE_SIZE;
- if (stage_size % PAGE_SIZE) {
- tape->pages_per_stage++;
- tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
+ buffer_size = tape->buffer_size;
+ tape->pages_per_buffer = buffer_size / PAGE_SIZE;
+ if (buffer_size % PAGE_SIZE) {
+ tape->pages_per_buffer++;
+ tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
}
- /* Select the "best" DSC read/write polling freq and pipeline size. */
+ /* select the "best" DSC read/write polling freq */
speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
- tape->max_stages = speed * 1000 * 10 / tape->stage_size;
-
- /* Limit memory use for pipeline to 10% of physical memory */
- si_meminfo(&si);
- if (tape->max_stages * tape->stage_size >
- si.totalram * si.mem_unit / 10)
- tape->max_stages =
- si.totalram * si.mem_unit / (10 * tape->stage_size);
-
- tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
- tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
- tape->max_pipeline =
- min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
- if (tape->max_stages == 0) {
- tape->max_stages = 1;
- tape->min_pipeline = 1;
- tape->max_pipeline = 1;
- }
-
- t1 = (tape->stage_size * HZ) / (speed * 1000);
- tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
- tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
-
- if (tape->max_stages)
- t = tn;
- else
- t = t1;
+ t = (IDETAPE_FIFO_THRESHOLD * tape->buffer_size * HZ) / (speed * 1000);
/*
* Ensure that the number we got makes sense; limit it within
@@ -3532,11 +2749,10 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
IDETAPE_DSC_RW_MIN);
printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
- "%dkB pipeline, %lums tDSC%s\n",
+ "%lums tDSC%s\n",
drive->name, tape->name, *(u16 *)&tape->caps[14],
- (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
- tape->stage_size / 1024,
- tape->max_stages * tape->stage_size / 1024,
+ (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
+ tape->buffer_size / 1024,
tape->best_dsc_rw_freq * 1000 / HZ,
drive->using_dma ? ", DMA":"");
@@ -3560,7 +2776,7 @@ static void ide_tape_release(struct kref *kref)
ide_drive_t *drive = tape->drive;
struct gendisk *g = tape->disk;
- BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
+ BUG_ON(tape->merge_bh_size);
drive->dsc_overlap = 0;
drive->driver_data = NULL;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 155cc904f4e..9f9ad9fb6b8 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -36,6 +36,7 @@
void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
{
ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
struct ide_taskfile *tf = &task->tf;
u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
@@ -59,34 +60,33 @@ void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
SELECT_MASK(drive, 0);
if (task->tf_flags & IDE_TFLAG_OUT_DATA)
- hwif->OUTW((tf->hob_data << 8) | tf->data,
- hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->OUTW((tf->hob_data << 8) | tf->data, io_ports->data_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
- hwif->OUTB(tf->hob_feature, hwif->io_ports[IDE_FEATURE_OFFSET]);
+ hwif->OUTB(tf->hob_feature, io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
- hwif->OUTB(tf->hob_nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]);
+ hwif->OUTB(tf->hob_nsect, io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
- hwif->OUTB(tf->hob_lbal, hwif->io_ports[IDE_SECTOR_OFFSET]);
+ hwif->OUTB(tf->hob_lbal, io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
- hwif->OUTB(tf->hob_lbam, hwif->io_ports[IDE_LCYL_OFFSET]);
+ hwif->OUTB(tf->hob_lbam, io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
- hwif->OUTB(tf->hob_lbah, hwif->io_ports[IDE_HCYL_OFFSET]);
+ hwif->OUTB(tf->hob_lbah, io_ports->lbah_addr);
if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
- hwif->OUTB(tf->feature, hwif->io_ports[IDE_FEATURE_OFFSET]);
+ hwif->OUTB(tf->feature, io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
- hwif->OUTB(tf->nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]);
+ hwif->OUTB(tf->nsect, io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
- hwif->OUTB(tf->lbal, hwif->io_ports[IDE_SECTOR_OFFSET]);
+ hwif->OUTB(tf->lbal, io_ports->lbal_addr);
if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
- hwif->OUTB(tf->lbam, hwif->io_ports[IDE_LCYL_OFFSET]);
+ hwif->OUTB(tf->lbam, io_ports->lbam_addr);
if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
- hwif->OUTB(tf->lbah, hwif->io_ports[IDE_HCYL_OFFSET]);
+ hwif->OUTB(tf->lbah, io_ports->lbah_addr);
if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
hwif->OUTB((tf->device & HIHI) | drive->select.all,
- hwif->io_ports[IDE_SELECT_OFFSET]);
+ io_ports->device_addr);
}
int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
@@ -135,6 +135,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
ide_hwif_t *hwif = HWIF(drive);
struct ide_taskfile *tf = &task->tf;
ide_handler_t *handler = NULL;
+ const struct ide_dma_ops *dma_ops = hwif->dma_ops;
if (task->data_phase == TASKFILE_MULTI_IN ||
task->data_phase == TASKFILE_MULTI_OUT) {
@@ -154,8 +155,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
switch (task->data_phase) {
case TASKFILE_MULTI_OUT:
case TASKFILE_OUT:
- hwif->OUTBSYNC(drive, tf->command,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTBSYNC(drive, tf->command, hwif->io_ports.command_addr);
ndelay(400); /* FIXME */
return pre_task_out_intr(drive, task->rq);
case TASKFILE_MULTI_IN:
@@ -178,10 +178,10 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
return ide_started;
default:
if (task_dma_ok(task) == 0 || drive->using_dma == 0 ||
- hwif->dma_setup(drive))
+ dma_ops->dma_setup(drive))
return ide_stopped;
- hwif->dma_exec_cmd(drive, tf->command);
- hwif->dma_start(drive);
+ dma_ops->dma_exec_cmd(drive, tf->command);
+ dma_ops->dma_start(drive);
return ide_started;
}
}
@@ -455,7 +455,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
/* Error? */
if (stat & ERR_STAT)
- return task_error(drive, rq, __FUNCTION__, stat);
+ return task_error(drive, rq, __func__, stat);
/* Didn't want any data? Odd. */
if (!(stat & DRQ_STAT))
@@ -467,7 +467,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
if (!hwif->nleft) {
stat = wait_drive_not_busy(drive);
if (!OK_STAT(stat, 0, BAD_STAT))
- return task_error(drive, rq, __FUNCTION__, stat);
+ return task_error(drive, rq, __func__, stat);
task_end_request(drive, rq, stat);
return ide_stopped;
}
@@ -488,11 +488,11 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
u8 stat = ide_read_status(drive);
if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
- return task_error(drive, rq, __FUNCTION__, stat);
+ return task_error(drive, rq, __func__, stat);
/* Deal with unexpected ATA data phase. */
if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
- return task_error(drive, rq, __FUNCTION__, stat);
+ return task_error(drive, rq, __func__, stat);
if (!hwif->nleft) {
task_end_request(drive, rq, stat);
@@ -675,7 +675,7 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
/* (hs): give up if multcount is not set */
printk(KERN_ERR "%s: %s Multimode Write " \
"multcount is not set\n",
- drive->name, __FUNCTION__);
+ drive->name, __func__);
err = -EPERM;
goto abort;
}
@@ -692,7 +692,7 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
/* (hs): give up if multcount is not set */
printk(KERN_ERR "%s: %s Multimode Read failure " \
"multcount is not set\n",
- drive->name, __FUNCTION__);
+ drive->name, __func__);
err = -EPERM;
goto abort;
}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 917c72dcd33..999584c03d9 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -94,19 +94,8 @@ DEFINE_MUTEX(ide_cfg_mtx);
int noautodma = 0;
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_noacpi = 0;
-int ide_noacpitfs = 1;
-int ide_noacpionboot = 1;
-#endif
-
-/*
- * This is declared extern in ide.h, for access by other IDE modules:
- */
ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
-EXPORT_SYMBOL(ide_hwifs);
-
static void ide_port_init_devices_data(ide_hwif_t *);
/*
@@ -232,117 +221,6 @@ static int ide_system_bus_speed(void)
return pci_dev_present(pci_default) ? 33 : 50;
}
-ide_hwif_t * ide_find_port(unsigned long base)
-{
- ide_hwif_t *hwif;
- int i;
-
- for (i = 0; i < MAX_HWIFS; i++) {
- hwif = &ide_hwifs[i];
- if (hwif->io_ports[IDE_DATA_OFFSET] == base)
- goto found;
- }
-
- for (i = 0; i < MAX_HWIFS; i++) {
- hwif = &ide_hwifs[i];
- if (hwif->chipset == ide_unknown)
- goto found;
- }
-
- hwif = NULL;
-found:
- return hwif;
-}
-
-EXPORT_SYMBOL_GPL(ide_find_port);
-
-static struct resource* hwif_request_region(ide_hwif_t *hwif,
- unsigned long addr, int num)
-{
- struct resource *res = request_region(addr, num, hwif->name);
-
- if (!res)
- printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
- hwif->name, addr, addr+num-1);
- return res;
-}
-
-/**
- * ide_hwif_request_regions - request resources for IDE
- * @hwif: interface to use
- *
- * Requests all the needed resources for an interface.
- * Right now core IDE code does this work which is deeply wrong.
- * MMIO leaves it to the controller driver,
- * PIO will migrate this way over time.
- */
-
-int ide_hwif_request_regions(ide_hwif_t *hwif)
-{
- unsigned long addr;
- unsigned int i;
-
- if (hwif->mmio)
- return 0;
- addr = hwif->io_ports[IDE_CONTROL_OFFSET];
- if (addr && !hwif_request_region(hwif, addr, 1))
- goto control_region_busy;
- hwif->straight8 = 0;
- addr = hwif->io_ports[IDE_DATA_OFFSET];
- if ((addr | 7) == hwif->io_ports[IDE_STATUS_OFFSET]) {
- if (!hwif_request_region(hwif, addr, 8))
- goto data_region_busy;
- hwif->straight8 = 1;
- return 0;
- }
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- addr = hwif->io_ports[i];
- if (!hwif_request_region(hwif, addr, 1)) {
- while (--i)
- release_region(addr, 1);
- goto data_region_busy;
- }
- }
- return 0;
-
-data_region_busy:
- addr = hwif->io_ports[IDE_CONTROL_OFFSET];
- if (addr)
- release_region(addr, 1);
-control_region_busy:
- /* If any errors are return, we drop the hwif interface. */
- return -EBUSY;
-}
-
-/**
- * ide_hwif_release_regions - free IDE resources
- *
- * Note that we only release the standard ports,
- * and do not even try to handle any extra ports
- * allocated for weird IDE interface chipsets.
- *
- * Note also that we don't yet handle mmio resources here. More
- * importantly our caller should be doing this so we need to
- * restructure this as a helper function for drivers.
- */
-
-void ide_hwif_release_regions(ide_hwif_t *hwif)
-{
- u32 i = 0;
-
- if (hwif->mmio)
- return;
- if (hwif->io_ports[IDE_CONTROL_OFFSET])
- release_region(hwif->io_ports[IDE_CONTROL_OFFSET], 1);
- if (hwif->straight8) {
- release_region(hwif->io_ports[IDE_DATA_OFFSET], 8);
- return;
- }
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
- if (hwif->io_ports[i])
- release_region(hwif->io_ports[i], 1);
-}
-
void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
{
ide_hwgroup_t *hwgroup = hwif->hwgroup;
@@ -409,7 +287,7 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
/**
* ide_unregister - free an IDE interface
- * @index: index of interface (will change soon to a pointer)
+ * @hwif: IDE interface
*
* Perform the final unregister of an IDE interface. At the moment
* we don't refcount interfaces so this will also get split up.
@@ -429,19 +307,16 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
* This is raving bonkers.
*/
-void ide_unregister(unsigned int index)
+void ide_unregister(ide_hwif_t *hwif)
{
- ide_hwif_t *hwif, *g;
+ ide_hwif_t *g;
ide_hwgroup_t *hwgroup;
int irq_count = 0;
- BUG_ON(index >= MAX_HWIFS);
-
BUG_ON(in_interrupt());
BUG_ON(irqs_disabled());
mutex_lock(&ide_cfg_mtx);
spin_lock_irq(&ide_lock);
- hwif = &ide_hwifs[index];
if (!hwif->present)
goto abort;
__ide_port_unregister_devices(hwif);
@@ -479,12 +354,10 @@ void ide_unregister(unsigned int index)
spin_lock_irq(&ide_lock);
if (hwif->dma_base)
- (void)ide_release_dma(hwif);
-
- ide_hwif_release_regions(hwif);
+ ide_release_dma_engine(hwif);
/* restore hwif data to pristine status */
- ide_init_port_data(hwif, index);
+ ide_init_port_data(hwif, hwif->index);
abort:
spin_unlock_irq(&ide_lock);
@@ -495,9 +368,8 @@ EXPORT_SYMBOL(ide_unregister);
void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
{
- memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
+ memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
hwif->irq = hw->irq;
- hwif->noprobe = 0;
hwif->chipset = hw->chipset;
hwif->gendev.parent = hw->dev;
hwif->ack_intr = hw->ack_intr;
@@ -588,7 +460,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
if (!drive->id || !(drive->id->capability & 1))
goto out;
- if (hwif->dma_host_set == NULL)
+ if (hwif->dma_ops == NULL)
goto out;
err = -EBUSY;
@@ -627,11 +499,14 @@ out:
int set_pio_mode(ide_drive_t *drive, int arg)
{
struct request rq;
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
if (arg < 0 || arg > 255)
return -EINVAL;
- if (drive->hwif->set_pio_mode == NULL)
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
return -ENOSYS;
if (drive->special.b.set_tune)
@@ -953,16 +828,6 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m
return 0; /* zero = nothing matched */
}
-extern int probe_ali14xx;
-extern int probe_umc8672;
-extern int probe_dtc2278;
-extern int probe_ht6560b;
-extern int probe_qd65xx;
-extern int cmd640_vlb;
-extern int probe_4drives;
-
-static int __initdata is_chipset_set;
-
/*
* ide_setup() gets called VERY EARLY during initialization,
* to handle kernel "command line" strings beginning with "hdx=" or "ide".
@@ -971,14 +836,12 @@ static int __initdata is_chipset_set;
*/
static int __init ide_setup(char *s)
{
- int i, vals[3];
ide_hwif_t *hwif;
ide_drive_t *drive;
unsigned int hw, unit;
+ int vals[3];
const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1);
- const char max_hwif = '0' + (MAX_HWIFS - 1);
-
if (strncmp(s,"hd",2) == 0 && s[2] == '=') /* hd= is for hd.c */
return 0; /* driver and not us */
@@ -994,7 +857,7 @@ static int __init ide_setup(char *s)
printk(" : Enabled support for IDE doublers\n");
ide_doubler = 1;
- return 1;
+ goto obsolete_option;
}
#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
@@ -1008,17 +871,17 @@ static int __init ide_setup(char *s)
if (!strcmp(s, "ide=noacpi")) {
//printk(" : Disable IDE ACPI support.\n");
ide_noacpi = 1;
- return 1;
+ goto obsolete_option;
}
if (!strcmp(s, "ide=acpigtf")) {
//printk(" : Enable IDE ACPI _GTF support.\n");
- ide_noacpitfs = 0;
- return 1;
+ ide_acpigtf = 1;
+ goto obsolete_option;
}
if (!strcmp(s, "ide=acpionboot")) {
//printk(" : Call IDE ACPI methods on boot.\n");
- ide_noacpionboot = 0;
- return 1;
+ ide_acpionboot = 1;
+ goto obsolete_option;
}
#endif /* CONFIG_BLK_DEV_IDEACPI */
@@ -1028,7 +891,7 @@ static int __init ide_setup(char *s)
if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
const char *hd_words[] = {
"none", "noprobe", "nowerr", "cdrom", "nodma",
- "autotune", "noautotune", "-8", "-9", "-10",
+ "-6", "-7", "-8", "-9", "-10",
"noflush", "remap", "remap63", "scsi", NULL };
unit = s[2] - 'a';
hw = unit / MAX_DRIVES;
@@ -1043,30 +906,22 @@ static int __init ide_setup(char *s)
case -1: /* "none" */
case -2: /* "noprobe" */
drive->noprobe = 1;
- goto done;
+ goto obsolete_option;
case -3: /* "nowerr" */
drive->bad_wstat = BAD_R_STAT;
- hwif->noprobe = 0;
- goto done;
+ goto obsolete_option;
case -4: /* "cdrom" */
drive->present = 1;
drive->media = ide_cdrom;
/* an ATAPI device ignores DRDY */
drive->ready_stat = 0;
- hwif->noprobe = 0;
- goto done;
+ goto obsolete_option;
case -5: /* nodma */
drive->nodma = 1;
- goto done;
- case -6: /* "autotune" */
- drive->autotune = IDE_TUNE_AUTO;
- goto obsolete_option;
- case -7: /* "noautotune" */
- drive->autotune = IDE_TUNE_NOAUTO;
goto obsolete_option;
case -11: /* noflush */
drive->noflush = 1;
- goto done;
+ goto obsolete_option;
case -12: /* "remap" */
drive->remap_0_to_1 = 1;
goto obsolete_option;
@@ -1084,8 +939,7 @@ static int __init ide_setup(char *s)
drive->sect = drive->bios_sect = vals[2];
drive->present = 1;
drive->forced_geom = 1;
- hwif->noprobe = 0;
- goto done;
+ goto obsolete_option;
default:
goto bad_option;
}
@@ -1103,126 +957,15 @@ static int __init ide_setup(char *s)
idebus_parameter = vals[0];
} else
printk(" -- BAD BUS SPEED! Expected value from 20 to 66");
- goto done;
+ goto obsolete_option;
}
- /*
- * Look for interface options: "idex="
- */
- if (s[3] >= '0' && s[3] <= max_hwif) {
- /*
- * Be VERY CAREFUL changing this: note hardcoded indexes below
- * (-8, -9, -10) are reserved to ease the hardcoding.
- */
- static const char *ide_words[] = {
- "minus1", "serialize", "minus3", "minus4",
- "reset", "minus6", "ata66", "minus8", "minus9",
- "minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb",
- "dtc2278", "umc8672", "ali14xx", NULL };
-
- hw = s[3] - '0';
- hwif = &ide_hwifs[hw];
- i = match_parm(&s[4], ide_words, vals, 3);
- /*
- * Cryptic check to ensure chipset not already set for hwif.
- * Note: we can't depend on hwif->chipset here.
- */
- if (i >= -18 && i <= -11) {
- /* chipset already specified */
- if (is_chipset_set)
- goto bad_option;
- /* these drivers are for "ide0=" only */
- if (hw != 0)
- goto bad_hwif;
- is_chipset_set = 1;
- printk("\n");
- }
-
- switch (i) {
-#ifdef CONFIG_BLK_DEV_ALI14XX
- case -17: /* "ali14xx" */
- probe_ali14xx = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_UMC8672
- case -16: /* "umc8672" */
- probe_umc8672 = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_DTC2278
- case -15: /* "dtc2278" */
- probe_dtc2278 = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_CMD640
- case -14: /* "cmd640_vlb" */
- cmd640_vlb = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_HT6560B
- case -13: /* "ht6560b" */
- probe_ht6560b = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_QD65XX
- case -12: /* "qd65xx" */
- probe_qd65xx = 1;
- goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_4DRIVES
- case -11: /* "four" drives on one set of ports */
- probe_4drives = 1;
- goto obsolete_option;
-#endif
- case -10: /* minus10 */
- case -9: /* minus9 */
- case -8: /* minus8 */
- case -6:
- case -4:
- case -3:
- goto bad_option;
- case -7: /* ata66 */
-#ifdef CONFIG_BLK_DEV_IDEPCI
- /*
- * Use ATA_CBL_PATA40_SHORT so drive side
- * cable detection is also overriden.
- */
- hwif->cbl = ATA_CBL_PATA40_SHORT;
- goto obsolete_option;
-#else
- goto bad_hwif;
-#endif
- case -5: /* "reset" */
- hwif->reset = 1;
- goto obsolete_option;
- case -2: /* "serialize" */
- hwif->mate = &ide_hwifs[hw^1];
- hwif->mate->mate = hwif;
- hwif->serialized = hwif->mate->serialized = 1;
- goto obsolete_option;
-
- case -1:
- case 0:
- case 1:
- case 2:
- case 3:
- goto bad_option;
- default:
- printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n");
- return 1;
- }
- }
bad_option:
printk(" -- BAD OPTION\n");
return 1;
obsolete_option:
printk(" -- OBSOLETE OPTION, WILL BE REMOVED SOON!\n");
return 1;
-bad_hwif:
- printk("-- NOT SUPPORTED ON ide%d", hw);
-done:
- printk("\n");
- return 1;
}
EXPORT_SYMBOL(ide_lock);
@@ -1358,6 +1101,185 @@ static void ide_port_class_release(struct device *portdev)
put_device(&hwif->gendev);
}
+int ide_vlb_clk;
+EXPORT_SYMBOL_GPL(ide_vlb_clk);
+
+module_param_named(vlb_clock, ide_vlb_clk, int, 0);
+MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)");
+
+int ide_pci_clk;
+EXPORT_SYMBOL_GPL(ide_pci_clk);
+
+module_param_named(pci_clock, ide_pci_clk, int, 0);
+MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
+
+static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
+{
+ int a, b, i, j = 1;
+ unsigned int *dev_param_mask = (unsigned int *)kp->arg;
+
+ if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 &&
+ sscanf(s, "%d.%d", &a, &b) != 2)
+ return -EINVAL;
+
+ i = a * MAX_DRIVES + b;
+
+ if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (j)
+ *dev_param_mask |= (1 << i);
+ else
+ *dev_param_mask &= (1 << i);
+
+ return 0;
+}
+
+static unsigned int ide_nodma;
+
+module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0);
+MODULE_PARM_DESC(nodma, "disallow DMA for a device");
+
+static unsigned int ide_noflush;
+
+module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
+MODULE_PARM_DESC(noflush, "disable flush requests for a device");
+
+static unsigned int ide_noprobe;
+
+module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
+MODULE_PARM_DESC(noprobe, "skip probing for a device");
+
+static unsigned int ide_nowerr;
+
+module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0);
+MODULE_PARM_DESC(nowerr, "ignore the WRERR_STAT bit for a device");
+
+static unsigned int ide_cdroms;
+
+module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0);
+MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
+
+struct chs_geom {
+ unsigned int cyl;
+ u8 head;
+ u8 sect;
+};
+
+static unsigned int ide_disks;
+static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
+
+static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
+{
+ int a, b, c = 0, h = 0, s = 0, i, j = 1;
+
+ if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 &&
+ sscanf(str, "%d.%d:%d", &a, &b, &j) != 3)
+ return -EINVAL;
+
+ i = a * MAX_DRIVES + b;
+
+ if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (c > INT_MAX || h > 255 || s > 255)
+ return -EINVAL;
+
+ if (j)
+ ide_disks |= (1 << i);
+ else
+ ide_disks &= (1 << i);
+
+ ide_disks_chs[i].cyl = c;
+ ide_disks_chs[i].head = h;
+ ide_disks_chs[i].sect = s;
+
+ return 0;
+}
+
+module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0);
+MODULE_PARM_DESC(chs, "force device as a disk (using CHS)");
+
+static void ide_dev_apply_params(ide_drive_t *drive)
+{
+ int i = drive->hwif->index * MAX_DRIVES + drive->select.b.unit;
+
+ if (ide_nodma & (1 << i)) {
+ printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name);
+ drive->nodma = 1;
+ }
+ if (ide_noflush & (1 << i)) {
+ printk(KERN_INFO "ide: disabling flush requests for %s\n",
+ drive->name);
+ drive->noflush = 1;
+ }
+ if (ide_noprobe & (1 << i)) {
+ printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
+ drive->noprobe = 1;
+ }
+ if (ide_nowerr & (1 << i)) {
+ printk(KERN_INFO "ide: ignoring the WRERR_STAT bit for %s\n",
+ drive->name);
+ drive->bad_wstat = BAD_R_STAT;
+ }
+ if (ide_cdroms & (1 << i)) {
+ printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name);
+ drive->present = 1;
+ drive->media = ide_cdrom;
+ /* an ATAPI device ignores DRDY */
+ drive->ready_stat = 0;
+ }
+ if (ide_disks & (1 << i)) {
+ drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl;
+ drive->head = drive->bios_head = ide_disks_chs[i].head;
+ drive->sect = drive->bios_sect = ide_disks_chs[i].sect;
+ drive->forced_geom = 1;
+ printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n",
+ drive->name,
+ drive->cyl, drive->head, drive->sect);
+ drive->present = 1;
+ drive->media = ide_disk;
+ drive->ready_stat = READY_STAT;
+ }
+}
+
+static unsigned int ide_ignore_cable;
+
+static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
+{
+ int i, j = 1;
+
+ if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
+ return -EINVAL;
+
+ if (i >= MAX_HWIFS || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (j)
+ ide_ignore_cable |= (1 << i);
+ else
+ ide_ignore_cable &= (1 << i);
+
+ return 0;
+}
+
+module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0);
+MODULE_PARM_DESC(ignore_cable, "ignore cable detection");
+
+void ide_port_apply_params(ide_hwif_t *hwif)
+{
+ int i;
+
+ if (ide_ignore_cable & (1 << hwif->index)) {
+ printk(KERN_INFO "ide: ignoring cable detection for %s\n",
+ hwif->name);
+ hwif->cbl = ATA_CBL_PATA40_SHORT;
+ }
+
+ for (i = 0; i < MAX_DRIVES; i++)
+ ide_dev_apply_params(&hwif->drives[i]);
+}
+
/*
* This is gets invoked once during initialization, to set *everything* up
*/
@@ -1424,11 +1346,6 @@ int __init init_module (void)
void __exit cleanup_module (void)
{
- int index;
-
- for (index = 0; index < MAX_HWIFS; ++index)
- ide_unregister(index);
-
proc_ide_destroy();
class_destroy(ide_port_class);
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index bc8b1f8de61..90c65cf9744 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -49,6 +49,8 @@
#include <asm/io.h>
+#define DRV_NAME "ali14xx"
+
/* port addresses for auto-detection */
#define ALI_NUM_PORTS 4
static const int ports[ALI_NUM_PORTS] __initdata =
@@ -86,7 +88,7 @@ static u8 regOff; /* output to base port to close registers */
/*
* Read a controller register.
*/
-static inline u8 inReg (u8 reg)
+static inline u8 inReg(u8 reg)
{
outb_p(reg, regPort);
return inb(dataPort);
@@ -95,7 +97,7 @@ static inline u8 inReg (u8 reg)
/*
* Write a controller register.
*/
-static void outReg (u8 data, u8 reg)
+static void outReg(u8 data, u8 reg)
{
outb_p(reg, regPort);
outb_p(data, dataPort);
@@ -114,7 +116,7 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
int time1, time2;
u8 param1, param2, param3, param4;
unsigned long flags;
- int bus_speed = system_bus_clock();
+ int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
/* calculate timing, according to PIO mode */
time1 = ide_pio_cycle_time(drive, pio);
@@ -143,7 +145,7 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
/*
* Auto-detect the IDE controller port.
*/
-static int __init findPort (void)
+static int __init findPort(void)
{
int i;
u8 t;
@@ -175,7 +177,8 @@ static int __init findPort (void)
/*
* Initialize controller registers with default values.
*/
-static int __init initRegisters (void) {
+static int __init initRegisters(void)
+{
const RegInitializer *p;
u8 t;
unsigned long flags;
@@ -191,17 +194,20 @@ static int __init initRegisters (void) {
return t;
}
+static const struct ide_port_ops ali14xx_port_ops = {
+ .set_pio_mode = ali14xx_set_pio_mode,
+};
+
static const struct ide_port_info ali14xx_port_info = {
+ .name = DRV_NAME,
.chipset = ide_ali14xx,
- .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE,
+ .port_ops = &ali14xx_port_ops,
+ .host_flags = IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
static int __init ali14xx_probe(void)
{
- static u8 idx[4] = { 0, 1, 0xff, 0xff };
- hw_regs_t hw[2];
-
printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n",
basePort, regOn);
@@ -211,26 +217,10 @@ static int __init ali14xx_probe(void)
return 1;
}
- memset(&hw, 0, sizeof(hw));
-
- ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
- hw[0].irq = 14;
-
- ide_std_init_ports(&hw[1], 0x170, 0x376);
- hw[1].irq = 15;
-
- ide_init_port_hw(&ide_hwifs[0], &hw[0]);
- ide_init_port_hw(&ide_hwifs[1], &hw[1]);
-
- ide_hwifs[0].set_pio_mode = &ali14xx_set_pio_mode;
- ide_hwifs[1].set_pio_mode = &ali14xx_set_pio_mode;
-
- ide_device_add(idx, &ali14xx_port_info);
-
- return 0;
+ return ide_legacy_device_add(&ali14xx_port_info, 0);
}
-int probe_ali14xx = 0;
+static int probe_ali14xx;
module_param_named(probe, probe_ali14xx, bool, 0);
MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index fdd3791e465..5c730e4dd73 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -102,7 +102,7 @@ static int buddha_ack_intr(ide_hwif_t *hwif)
{
unsigned char ch;
- ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+ ch = z_readb(hwif->io_ports.irq_addr);
if (!(ch & 0x80))
return 0;
return 1;
@@ -112,9 +112,9 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
{
unsigned char ch;
- ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+ ch = z_readb(hwif->io_ports.irq_addr);
/* X-Surf needs a 0 written to IRQ register to ensure ISA bit A11 stays at 0 */
- z_writeb(0, hwif->io_ports[IDE_IRQ_OFFSET]);
+ z_writeb(0, hwif->io_ports.irq_addr);
if (!(ch & 0x80))
return 0;
return 1;
@@ -128,13 +128,13 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
memset(hw, 0, sizeof(*hw));
- hw->io_ports[IDE_DATA_OFFSET] = base;
+ hw->io_ports.data_addr = base;
for (i = 1; i < 8; i++)
- hw->io_ports[i] = base + 2 + i * 4;
+ hw->io_ports_array[i] = base + 2 + i * 4;
- hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
- hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+ hw->io_ports.ctl_addr = ctl;
+ hw->io_ports.irq_addr = irq_port;
hw->irq = IRQ_AMIGA_PORTS;
hw->ack_intr = ack_intr;
@@ -221,15 +221,13 @@ fail_base2:
buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr);
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif) {
u8 index = hwif->index;
ide_init_port_data(hwif, index);
ide_init_port_hw(hwif, &hw);
- hwif->mmio = 1;
-
idx[i] = index;
}
}
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
index 5f69cd2ea6f..af791a02a12 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/legacy/dtc2278.c
@@ -16,6 +16,8 @@
#include <asm/io.h>
+#define DRV_NAME "dtc2278"
+
/*
* Changing this #undef to #define may solve start up problems in some systems.
*/
@@ -86,30 +88,26 @@ static void dtc2278_set_pio_mode(ide_drive_t *drive, const u8 pio)
}
}
+static const struct ide_port_ops dtc2278_port_ops = {
+ .set_pio_mode = dtc2278_set_pio_mode,
+};
+
static const struct ide_port_info dtc2278_port_info __initdata = {
+ .name = DRV_NAME,
.chipset = ide_dtc2278,
+ .port_ops = &dtc2278_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_NO_UNMASK_IRQS |
IDE_HFLAG_IO_32BIT |
/* disallow ->io_32bit changes */
IDE_HFLAG_NO_IO_32BIT |
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_NO_AUTOTUNE,
+ IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
static int __init dtc2278_probe(void)
{
unsigned long flags;
- ide_hwif_t *hwif, *mate;
- static u8 idx[4] = { 0, 1, 0xff, 0xff };
- hw_regs_t hw[2];
-
- hwif = &ide_hwifs[0];
- mate = &ide_hwifs[1];
-
- if (hwif->chipset != ide_unknown || mate->chipset != ide_unknown)
- return 1;
local_irq_save(flags);
/*
@@ -129,25 +127,10 @@ static int __init dtc2278_probe(void)
#endif
local_irq_restore(flags);
- memset(&hw, 0, sizeof(hw));
-
- ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
- hw[0].irq = 14;
-
- ide_std_init_ports(&hw[1], 0x170, 0x376);
- hw[1].irq = 15;
-
- ide_init_port_hw(hwif, &hw[0]);
- ide_init_port_hw(mate, &hw[1]);
-
- hwif->set_pio_mode = &dtc2278_set_pio_mode;
-
- ide_device_add(idx, &dtc2278_port_info);
-
- return 0;
+ return ide_legacy_device_add(&dtc2278_port_info, 0);
}
-int probe_dtc2278 = 0;
+static int probe_dtc2278;
module_param_named(probe, probe_dtc2278, bool, 0);
MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index e950afa5939..56cdaa0eeea 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -22,6 +22,7 @@
#include <asm/atariints.h>
#include <asm/atari_stdma.h>
+#define DRV_NAME "falconide"
/*
* Base of the IDE interface
@@ -49,12 +50,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
memset(hw, 0, sizeof(*hw));
- hw->io_ports[IDE_DATA_OFFSET] = ATA_HD_BASE;
+ hw->io_ports.data_addr = ATA_HD_BASE;
for (i = 1; i < 8; i++)
- hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4;
+ hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4;
- hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_BASE + ATA_HD_CONTROL;
+ hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL;
hw->irq = IRQ_MFP_IDE;
hw->ack_intr = NULL;
@@ -74,9 +75,14 @@ static int __init falconide_init(void)
printk(KERN_INFO "ide: Falcon IDE controller\n");
+ if (!request_mem_region(ATA_HD_BASE, 0x40, DRV_NAME)) {
+ printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
+ return -EBUSY;
+ }
+
falconide_setup_ports(&hw);
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif) {
u8 index = hwif->index;
u8 idx[4] = { index, 0xff, 0xff, 0xff };
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index e3b4638cc88..a9c2593a898 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -63,6 +63,8 @@
#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000)
int ide_doubler = 0; /* support IDE doublers? */
+module_param_named(doubler, ide_doubler, bool, 0);
+MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
@@ -74,7 +76,7 @@ static int gayle_ack_intr_a4000(ide_hwif_t *hwif)
{
unsigned char ch;
- ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+ ch = z_readb(hwif->io_ports.irq_addr);
if (!(ch & GAYLE_IRQ_IDE))
return 0;
return 1;
@@ -84,11 +86,11 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
{
unsigned char ch;
- ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+ ch = z_readb(hwif->io_ports.irq_addr);
if (!(ch & GAYLE_IRQ_IDE))
return 0;
- (void)z_readb(hwif->io_ports[IDE_STATUS_OFFSET]);
- z_writeb(0x7c, hwif->io_ports[IDE_IRQ_OFFSET]);
+ (void)z_readb(hwif->io_ports.status_addr);
+ z_writeb(0x7c, hwif->io_ports.irq_addr);
return 1;
}
@@ -100,13 +102,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
memset(hw, 0, sizeof(*hw));
- hw->io_ports[IDE_DATA_OFFSET] = base;
+ hw->io_ports.data_addr = base;
for (i = 1; i < 8; i++)
- hw->io_ports[i] = base + 2 + i * 4;
+ hw->io_ports_array[i] = base + 2 + i * 4;
- hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
- hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+ hw->io_ports.ctl_addr = ctl;
+ hw->io_ports.irq_addr = irq_port;
hw->irq = IRQ_AMIGA_PORTS;
hw->ack_intr = ack_intr;
@@ -175,15 +177,13 @@ found:
gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr);
- hwif = ide_find_port(base);
+ hwif = ide_find_port();
if (hwif) {
u8 index = hwif->index;
ide_init_port_data(hwif, index);
ide_init_port_hw(hwif, &hw);
- hwif->mmio = 1;
-
idx[i] = index;
} else
release_mem_region(res_start, res_n);
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 0b0d8673192..abdedf56643 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -122,12 +122,12 @@ static int hd_error;
* This struct defines the HD's and their types.
*/
struct hd_i_struct {
- unsigned int head,sect,cyl,wpcom,lzone,ctl;
+ unsigned int head, sect, cyl, wpcom, lzone, ctl;
int unit;
int recalibrate;
int special_op;
};
-
+
#ifdef HD_TYPE
static struct hd_i_struct hd_info[] = { HD_TYPE };
static int NR_HD = ARRAY_SIZE(hd_info);
@@ -168,7 +168,7 @@ unsigned long read_timer(void)
spin_lock_irqsave(&i8253_lock, flags);
t = jiffies * 11932;
- outb_p(0, 0x43);
+ outb_p(0, 0x43);
i = inb_p(0x40);
i |= inb(0x40) << 8;
spin_unlock_irqrestore(&i8253_lock, flags);
@@ -183,7 +183,7 @@ static void __init hd_setup(char *str, int *ints)
if (ints[0] != 3)
return;
if (hd_info[0].head != 0)
- hdind=1;
+ hdind = 1;
hd_info[hdind].head = ints[2];
hd_info[hdind].sect = ints[3];
hd_info[hdind].cyl = ints[1];
@@ -193,7 +193,7 @@ static void __init hd_setup(char *str, int *ints)
NR_HD = hdind+1;
}
-static void dump_status (const char *msg, unsigned int stat)
+static void dump_status(const char *msg, unsigned int stat)
{
char *name = "hd?";
if (CURRENT)
@@ -291,7 +291,6 @@ static int controller_ready(unsigned int drive, unsigned int head)
return 0;
}
-
static void hd_out(struct hd_i_struct *disk,
unsigned int nsect,
unsigned int sect,
@@ -313,15 +312,15 @@ static void hd_out(struct hd_i_struct *disk,
return;
}
SET_HANDLER(intr_addr);
- outb_p(disk->ctl,HD_CMD);
- port=HD_DATA;
- outb_p(disk->wpcom>>2,++port);
- outb_p(nsect,++port);
- outb_p(sect,++port);
- outb_p(cyl,++port);
- outb_p(cyl>>8,++port);
- outb_p(0xA0|(disk->unit<<4)|head,++port);
- outb_p(cmd,++port);
+ outb_p(disk->ctl, HD_CMD);
+ port = HD_DATA;
+ outb_p(disk->wpcom >> 2, ++port);
+ outb_p(nsect, ++port);
+ outb_p(sect, ++port);
+ outb_p(cyl, ++port);
+ outb_p(cyl >> 8, ++port);
+ outb_p(0xA0 | (disk->unit << 4) | head, ++port);
+ outb_p(cmd, ++port);
}
static void hd_request (void);
@@ -344,14 +343,14 @@ static void reset_controller(void)
{
int i;
- outb_p(4,HD_CMD);
- for(i = 0; i < 1000; i++) barrier();
- outb_p(hd_info[0].ctl & 0x0f,HD_CMD);
- for(i = 0; i < 1000; i++) barrier();
+ outb_p(4, HD_CMD);
+ for (i = 0; i < 1000; i++) barrier();
+ outb_p(hd_info[0].ctl & 0x0f, HD_CMD);
+ for (i = 0; i < 1000; i++) barrier();
if (drive_busy())
printk("hd: controller still busy\n");
else if ((hd_error = inb(HD_ERROR)) != 1)
- printk("hd: controller reset failed: %02x\n",hd_error);
+ printk("hd: controller reset failed: %02x\n", hd_error);
}
static void reset_hd(void)
@@ -371,8 +370,8 @@ repeat:
if (++i < NR_HD) {
struct hd_i_struct *disk = &hd_info[i];
disk->special_op = disk->recalibrate = 1;
- hd_out(disk,disk->sect,disk->sect,disk->head-1,
- disk->cyl,WIN_SPECIFY,&reset_hd);
+ hd_out(disk, disk->sect, disk->sect, disk->head-1,
+ disk->cyl, WIN_SPECIFY, &reset_hd);
if (reset)
goto repeat;
} else
@@ -393,7 +392,7 @@ static void unexpected_hd_interrupt(void)
unsigned int stat = inb_p(HD_STATUS);
if (stat & (BUSY_STAT|DRQ_STAT|ECC_STAT|ERR_STAT)) {
- dump_status ("unexpected interrupt", stat);
+ dump_status("unexpected interrupt", stat);
SET_TIMER;
}
}
@@ -453,7 +452,7 @@ static void read_intr(void)
return;
ok_to_read:
req = CURRENT;
- insw(HD_DATA,req->buffer,256);
+ insw(HD_DATA, req->buffer, 256);
req->sector++;
req->buffer += 512;
req->errors = 0;
@@ -507,7 +506,7 @@ ok_to_write:
end_request(req, 1);
if (i > 0) {
SET_HANDLER(&write_intr);
- outsw(HD_DATA,req->buffer,256);
+ outsw(HD_DATA, req->buffer, 256);
local_irq_enable();
} else {
#if (HD_DELAY > 0)
@@ -560,11 +559,11 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
{
if (disk->recalibrate) {
disk->recalibrate = 0;
- hd_out(disk,disk->sect,0,0,0,WIN_RESTORE,&recal_intr);
+ hd_out(disk, disk->sect, 0, 0, 0, WIN_RESTORE, &recal_intr);
return reset;
}
if (disk->head > 16) {
- printk ("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
+ printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
end_request(req, 0);
}
disk->special_op = 0;
@@ -633,19 +632,21 @@ repeat:
if (blk_fs_request(req)) {
switch (rq_data_dir(req)) {
case READ:
- hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr);
+ hd_out(disk, nsect, sec, head, cyl, WIN_READ,
+ &read_intr);
if (reset)
goto repeat;
break;
case WRITE:
- hd_out(disk,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
+ hd_out(disk, nsect, sec, head, cyl, WIN_WRITE,
+ &write_intr);
if (reset)
goto repeat;
if (wait_DRQ()) {
bad_rw_intr();
goto repeat;
}
- outsw(HD_DATA,req->buffer,256);
+ outsw(HD_DATA, req->buffer, 256);
break;
default:
printk("unknown hd-command\n");
@@ -655,7 +656,7 @@ repeat:
}
}
-static void do_hd_request (struct request_queue * q)
+static void do_hd_request(struct request_queue *q)
{
disable_irq(HD_IRQ);
hd_request();
@@ -708,12 +709,12 @@ static int __init hd_init(void)
{
int drive;
- if (register_blkdev(MAJOR_NR,"hd"))
+ if (register_blkdev(MAJOR_NR, "hd"))
return -1;
hd_queue = blk_init_queue(do_hd_request, &hd_lock);
if (!hd_queue) {
- unregister_blkdev(MAJOR_NR,"hd");
+ unregister_blkdev(MAJOR_NR, "hd");
return -ENOMEM;
}
@@ -742,7 +743,7 @@ static int __init hd_init(void)
goto out;
}
- for (drive=0 ; drive < NR_HD ; drive++) {
+ for (drive = 0 ; drive < NR_HD ; drive++) {
struct gendisk *disk = alloc_disk(64);
struct hd_i_struct *p = &hd_info[drive];
if (!disk)
@@ -756,7 +757,7 @@ static int __init hd_init(void)
disk->queue = hd_queue;
p->unit = drive;
hd_gendisk[drive] = disk;
- printk ("%s: %luMB, CHS=%d/%d/%d\n",
+ printk("%s: %luMB, CHS=%d/%d/%d\n",
disk->disk_name, (unsigned long)get_capacity(disk)/2048,
p->cyl, p->head, p->sect);
}
@@ -776,7 +777,7 @@ static int __init hd_init(void)
}
/* Let them fly */
- for(drive=0; drive < NR_HD; drive++)
+ for (drive = 0; drive < NR_HD; drive++)
add_disk(hd_gendisk[drive]);
return 0;
@@ -791,7 +792,7 @@ out1:
NR_HD = 0;
out:
del_timer(&device_timer);
- unregister_blkdev(MAJOR_NR,"hd");
+ unregister_blkdev(MAJOR_NR, "hd");
blk_cleanup_queue(hd_queue);
return -1;
Enomem:
@@ -800,7 +801,8 @@ Enomem:
goto out;
}
-static int __init parse_hd_setup (char *line) {
+static int __init parse_hd_setup(char *line)
+{
int ints[6];
(void) get_options(line, ARRAY_SIZE(ints), ints);
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index 88fe9070c9c..4fe516df9f7 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -35,6 +35,7 @@
* Try: http://www.maf.iki.fi/~maf/ht6560b/
*/
+#define DRV_NAME "ht6560b"
#define HT6560B_VERSION "v0.08"
#include <linux/module.h>
@@ -156,8 +157,8 @@ static void ht6560b_selectproc (ide_drive_t *drive)
/*
* Set timing for this drive:
*/
- outb(timing, hwif->io_ports[IDE_SELECT_OFFSET]);
- (void)inb(hwif->io_ports[IDE_STATUS_OFFSET]);
+ outb(timing, hwif->io_ports.device_addr);
+ (void)inb(hwif->io_ports.status_addr);
#ifdef DEBUG
printk("ht6560b: %s: select=%#x timing=%#x\n",
drive->name, select, timing);
@@ -211,8 +212,8 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
{
int active_time, recovery_time;
int active_cycles, recovery_cycles;
- int bus_speed = system_bus_clock();
-
+ int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
+
if (pio) {
unsigned int cycle_time;
@@ -322,66 +323,44 @@ static void __init ht6560b_port_init_devs(ide_hwif_t *hwif)
hwif->drives[1].drive_data = t;
}
-int probe_ht6560b = 0;
+static int probe_ht6560b;
module_param_named(probe, probe_ht6560b, bool, 0);
MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
+static const struct ide_port_ops ht6560b_port_ops = {
+ .port_init_devs = ht6560b_port_init_devs,
+ .set_pio_mode = ht6560b_set_pio_mode,
+ .selectproc = ht6560b_selectproc,
+};
+
static const struct ide_port_info ht6560b_port_info __initdata = {
+ .name = DRV_NAME,
.chipset = ide_ht6560b,
+ .port_ops = &ht6560b_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */
IDE_HFLAG_NO_DMA |
- IDE_HFLAG_NO_AUTOTUNE |
IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO4,
};
static int __init ht6560b_init(void)
{
- ide_hwif_t *hwif, *mate;
- static u8 idx[4] = { 0, 1, 0xff, 0xff };
- hw_regs_t hw[2];
-
if (probe_ht6560b == 0)
return -ENODEV;
- hwif = &ide_hwifs[0];
- mate = &ide_hwifs[1];
-
- if (!request_region(HT_CONFIG_PORT, 1, hwif->name)) {
+ if (!request_region(HT_CONFIG_PORT, 1, DRV_NAME)) {
printk(KERN_NOTICE "%s: HT_CONFIG_PORT not found\n",
- __FUNCTION__);
+ __func__);
return -ENODEV;
}
if (!try_to_init_ht6560b()) {
- printk(KERN_NOTICE "%s: HBA not found\n", __FUNCTION__);
+ printk(KERN_NOTICE "%s: HBA not found\n", __func__);
goto release_region;
}
- memset(&hw, 0, sizeof(hw));
-
- ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
- hw[0].irq = 14;
-
- ide_std_init_ports(&hw[1], 0x170, 0x376);
- hw[1].irq = 15;
-
- ide_init_port_hw(hwif, &hw[0]);
- ide_init_port_hw(mate, &hw[1]);
-
- hwif->selectproc = &ht6560b_selectproc;
- hwif->set_pio_mode = &ht6560b_set_pio_mode;
-
- mate->selectproc = &ht6560b_selectproc;
- mate->set_pio_mode = &ht6560b_set_pio_mode;
-
- hwif->port_init_devs = ht6560b_port_init_devs;
- mate->port_init_devs = ht6560b_port_init_devs;
-
- ide_device_add(idx, &ht6560b_port_info);
-
- return 0;
+ return ide_legacy_device_add(&ht6560b_port_info, 0);
release_region:
release_region(HT_CONFIG_PORT, 1);
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
index ecd7f355355..ecae916a338 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -4,7 +4,9 @@
#include <linux/module.h>
#include <linux/ide.h>
-int probe_4drives = 0;
+#define DRV_NAME "ide-4drives"
+
+static int probe_4drives;
module_param_named(probe, probe_4drives, bool, 0);
MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
@@ -12,31 +14,51 @@ MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
static int __init ide_4drives_init(void)
{
ide_hwif_t *hwif, *mate;
- u8 idx[4] = { 0, 1, 0xff, 0xff };
+ unsigned long base = 0x1f0, ctl = 0x3f6;
+ u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
hw_regs_t hw;
if (probe_4drives == 0)
return -ENODEV;
- hwif = &ide_hwifs[0];
- mate = &ide_hwifs[1];
+ if (!request_region(base, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+ DRV_NAME, base, base + 7);
+ return -EBUSY;
+ }
+
+ if (!request_region(ctl, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+ DRV_NAME, ctl);
+ release_region(base, 8);
+ return -EBUSY;
+ }
memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, 0x1f0, 0x3f6);
+ ide_std_init_ports(&hw, base, ctl);
hw.irq = 14;
hw.chipset = ide_4drives;
- ide_init_port_hw(hwif, &hw);
- ide_init_port_hw(mate, &hw);
-
- mate->drives[0].select.all ^= 0x20;
- mate->drives[1].select.all ^= 0x20;
-
- hwif->mate = mate;
- mate->mate = hwif;
-
- hwif->serialized = mate->serialized = 1;
+ hwif = ide_find_port();
+ if (hwif) {
+ ide_init_port_hw(hwif, &hw);
+ idx[0] = hwif->index;
+ }
+
+ mate = ide_find_port();
+ if (mate) {
+ ide_init_port_hw(mate, &hw);
+ mate->drives[0].select.all ^= 0x20;
+ mate->drives[1].select.all ^= 0x20;
+ idx[1] = mate->index;
+
+ if (hwif) {
+ hwif->mate = mate;
+ mate->mate = hwif;
+ hwif->serialized = mate->serialized = 1;
+ }
+ }
ide_device_add(idx, NULL);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 9a23b94f293..aa2ea3deac8 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -51,6 +51,8 @@
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
+#define DRV_NAME "ide-cs"
+
/*====================================================================*/
/* Module parameters */
@@ -72,16 +74,11 @@ static char *version =
/*====================================================================*/
-static const char ide_major[] = {
- IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR,
- IDE4_MAJOR, IDE5_MAJOR
-};
-
typedef struct ide_info_t {
struct pcmcia_device *p_dev;
+ ide_hwif_t *hwif;
int ndev;
dev_node_t node;
- int hd;
} ide_info_t;
static void ide_release(struct pcmcia_device *);
@@ -136,45 +133,71 @@ static int ide_probe(struct pcmcia_device *link)
static void ide_detach(struct pcmcia_device *link)
{
+ ide_info_t *info = link->priv;
+ ide_hwif_t *hwif = info->hwif;
+
DEBUG(0, "ide_detach(0x%p)\n", link);
ide_release(link);
- kfree(link->priv);
+ release_region(hwif->io_ports.ctl_addr, 1);
+ release_region(hwif->io_ports.data_addr, 8);
+
+ kfree(info);
} /* ide_detach */
-static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle)
+static const struct ide_port_ops idecs_port_ops = {
+ .quirkproc = ide_undecoded_slave,
+};
+
+static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
+ unsigned long irq, struct pcmcia_device *handle)
{
ide_hwif_t *hwif;
hw_regs_t hw;
int i;
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ if (!request_region(io, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+ DRV_NAME, io, io + 7);
+ return NULL;
+ }
+
+ if (!request_region(ctl, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+ DRV_NAME, ctl);
+ release_region(io, 8);
+ return NULL;
+ }
+
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, io, ctl);
hw.irq = irq;
hw.chipset = ide_pci;
hw.dev = &handle->dev;
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif == NULL)
- return -1;
+ goto out_release;
i = hwif->index;
- if (hwif->present)
- ide_unregister(i);
- else
- ide_init_port_data(hwif, i);
-
+ ide_init_port_data(hwif, i);
ide_init_port_hw(hwif, &hw);
- hwif->quirkproc = &ide_undecoded_slave;
+ hwif->port_ops = &idecs_port_ops;
idx[0] = i;
ide_device_add(idx, NULL);
- return hwif->present ? i : -1;
+ if (hwif->present)
+ return hwif;
+
+out_release:
+ release_region(ctl, 1);
+ release_region(io, 8);
+ return NULL;
}
/*======================================================================
@@ -199,8 +222,9 @@ static int ide_config(struct pcmcia_device *link)
cistpl_cftable_entry_t dflt;
} *stk = NULL;
cistpl_cftable_entry_t *cfg;
- int i, pass, last_ret = 0, last_fn = 0, hd, is_kme = 0;
+ int i, pass, last_ret = 0, last_fn = 0, is_kme = 0;
unsigned long io_base, ctl_base;
+ ide_hwif_t *hwif;
DEBUG(0, "ide_config(0x%p)\n", link);
@@ -296,14 +320,15 @@ static int ide_config(struct pcmcia_device *link)
outb(0x81, ctl_base+1);
/* retry registration in case device is still spinning up */
- for (hd = -1, i = 0; i < 10; i++) {
- hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
- if (hd >= 0) break;
+ for (i = 0; i < 10; i++) {
+ hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
+ if (hwif)
+ break;
if (link->io.NumPorts1 == 0x20) {
outb(0x02, ctl_base + 0x10);
- hd = idecs_register(io_base + 0x10, ctl_base + 0x10,
- link->irq.AssignedIRQ, link);
- if (hd >= 0) {
+ hwif = idecs_register(io_base + 0x10, ctl_base + 0x10,
+ link->irq.AssignedIRQ, link);
+ if (hwif) {
io_base += 0x10;
ctl_base += 0x10;
break;
@@ -312,7 +337,7 @@ static int ide_config(struct pcmcia_device *link)
msleep(100);
}
- if (hd < 0) {
+ if (hwif == NULL) {
printk(KERN_NOTICE "ide-cs: ide_register() at 0x%3lx & 0x%3lx"
", irq %u failed\n", io_base, ctl_base,
link->irq.AssignedIRQ);
@@ -320,10 +345,10 @@ static int ide_config(struct pcmcia_device *link)
}
info->ndev = 1;
- sprintf(info->node.dev_name, "hd%c", 'a' + (hd * 2));
- info->node.major = ide_major[hd];
+ sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2);
+ info->node.major = hwif->major;
info->node.minor = 0;
- info->hd = hd;
+ info->hwif = hwif;
link->dev_node = &info->node;
printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
@@ -354,13 +379,14 @@ failed:
void ide_release(struct pcmcia_device *link)
{
ide_info_t *info = link->priv;
+ ide_hwif_t *hwif = info->hwif;
DEBUG(0, "ide_release(0x%p)\n", link);
if (info->ndev) {
/* FIXME: if this fails we need to queue the cleanup somehow
-- need to investigate the required PCMCIA magic */
- ide_unregister(info->hd);
+ ide_unregister(hwif);
}
info->ndev = 0;
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index 361b1bb544b..8279dc7ca4c 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -30,14 +30,14 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
unsigned long port = (unsigned long)base;
int i;
- hw->io_ports[IDE_DATA_OFFSET] = port;
+ hw->io_ports.data_addr = port;
port += (1 << pdata->ioport_shift);
- for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET;
+ for (i = 1; i <= 7;
i++, port += (1 << pdata->ioport_shift))
- hw->io_ports[i] = port;
+ hw->io_ports_array[i] = port;
- hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
+ hw->io_ports.ctl_addr = (unsigned long)ctrl;
hw->irq = irq;
@@ -89,7 +89,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
res_alt->start, res_alt->end - res_alt->start + 1);
}
- hwif = ide_find_port((unsigned long)base);
+ hwif = ide_find_port();
if (!hwif) {
ret = -ENODEV;
goto out;
@@ -101,10 +101,8 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
ide_init_port_hw(hwif, &hw);
- if (mmio) {
- hwif->mmio = 1;
+ if (mmio)
default_hwif_mmiops(hwif);
- }
idx[0] = hwif->index;
@@ -122,7 +120,7 @@ static int __devexit plat_ide_remove(struct platform_device *pdev)
{
ide_hwif_t *hwif = pdev->dev.driver_data;
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
return 0;
}
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index eaf5dbe58bc..1f527bbf8d9 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -72,9 +72,9 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
memset(hw, 0, sizeof(*hw));
for (i = 0; i < 8; i++)
- hw->io_ports[i] = base + i * 4;
+ hw->io_ports_array[i] = base + i * 4;
- hw->io_ports[IDE_CONTROL_OFFSET] = base + IDE_CONTROL;
+ hw->io_ports.ctl_addr = base + IDE_CONTROL;
hw->irq = irq;
hw->ack_intr = ack_intr;
@@ -120,7 +120,7 @@ static int __init macide_init(void)
macide_setup_ports(&hw, base, irq, ack_intr);
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif) {
u8 index = hwif->index;
u8 idx[4] = { index, 0xff, 0xff, 0xff };
@@ -128,8 +128,6 @@ static int __init macide_init(void)
ide_init_port_data(hwif, index);
ide_init_port_hw(hwif, &hw);
- hwif->mmio = 1;
-
ide_device_add(idx, NULL);
}
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 2da28759686..a3573d40b4b 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -80,10 +80,10 @@ void q40_ide_setup_ports ( hw_regs_t *hw,
for (i = 0; i < IDE_NR_PORTS; i++) {
/* BIG FAT WARNING:
assumption: only DATA port is ever used in 16 bit mode */
- if ( i==0 )
- hw->io_ports[i] = Q40_ISA_IO_W(base + offsets[i]);
+ if (i == 0)
+ hw->io_ports_array[i] = Q40_ISA_IO_W(base + offsets[i]);
else
- hw->io_ports[i] = Q40_ISA_IO_B(base + offsets[i]);
+ hw->io_ports_array[i] = Q40_ISA_IO_B(base + offsets[i]);
}
hw->irq = irq;
@@ -137,11 +137,10 @@ static int __init q40ide_init(void)
// m68kide_iops,
q40ide_default_irq(pcide_bases[i]));
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif) {
ide_init_port_data(hwif, hwif->index);
ide_init_port_hw(hwif, &hw);
- hwif->mmio = 1;
idx[i] = hwif->index;
}
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index 7016bdf4fcc..6424af15432 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -11,11 +11,7 @@
*
* QDI QD6500/QD6580 EIDE controller fast support
*
- * Please set local bus speed using kernel parameter idebus
- * for example, "idebus=33" stands for 33Mhz VLbus
* To activate controller support, use "ide0=qd65xx"
- * To enable tuning, use "hda=autotune hdb=autotune"
- * To enable 2nd channel tuning (qd6580 only), use "hdc=autotune hdd=autotune"
*/
/*
@@ -37,6 +33,8 @@
#include <asm/system.h>
#include <asm/io.h>
+#define DRV_NAME "qd65xx"
+
#include "qd65xx.h"
/*
@@ -88,12 +86,12 @@
static int timings[4]={-1,-1,-1,-1}; /* stores current timing for each timer */
/*
- * qd_select:
+ * qd65xx_select:
*
- * This routine is invoked from ide.c to prepare for access to a given drive.
+ * This routine is invoked to prepare for access to a given drive.
*/
-static void qd_select (ide_drive_t *drive)
+static void qd65xx_select(ide_drive_t *drive)
{
u8 index = (( (QD_TIMREG(drive)) & 0x80 ) >> 7) |
(QD_TIMREG(drive) & 0x02);
@@ -112,17 +110,18 @@ static void qd_select (ide_drive_t *drive)
static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time)
{
- u8 active_cycle,recovery_cycle;
+ int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
+ u8 act_cyc, rec_cyc;
- if (system_bus_clock()<=33) {
- active_cycle = 9 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 9);
- recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 0, 15);
+ if (clk <= 33) {
+ act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9);
+ rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15);
} else {
- active_cycle = 8 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 1, 8);
- recovery_cycle = 18 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 3, 18);
+ act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8);
+ rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18);
}
- return((recovery_cycle<<4) | 0x08 | active_cycle);
+ return (rec_cyc << 4) | 0x08 | act_cyc;
}
/*
@@ -133,10 +132,13 @@ static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery
static u8 qd6580_compute_timing (int active_time, int recovery_time)
{
- u8 active_cycle = 17 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 17);
- u8 recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 2, 15);
+ int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
+ u8 act_cyc, rec_cyc;
+
+ act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17);
+ rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15);
- return((recovery_cycle<<4) | active_cycle);
+ return (rec_cyc << 4) | act_cyc;
}
/*
@@ -168,36 +170,15 @@ static int qd_find_disk_type (ide_drive_t *drive,
}
/*
- * qd_timing_ok:
- *
- * check whether timings don't conflict
- */
-
-static int qd_timing_ok (ide_drive_t drives[])
-{
- return (IDE_IMPLY(drives[0].present && drives[1].present,
- IDE_IMPLY(QD_TIMREG(drives) == QD_TIMREG(drives+1),
- QD_TIMING(drives) == QD_TIMING(drives+1))));
- /* if same timing register, must be same timing */
-}
-
-/*
* qd_set_timing:
*
- * records the timing, and enables selectproc as needed
+ * records the timing
*/
static void qd_set_timing (ide_drive_t *drive, u8 timing)
{
- ide_hwif_t *hwif = HWIF(drive);
-
drive->drive_data &= 0xff00;
drive->drive_data |= timing;
- if (qd_timing_ok(hwif->drives)) {
- qd_select(drive); /* selects once */
- hwif->selectproc = NULL;
- } else
- hwif->selectproc = &qd_select;
printk(KERN_DEBUG "%s: %#x\n", drive->name, timing);
}
@@ -225,10 +206,11 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio)
static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio)
{
- int base = HWIF(drive)->select_data;
+ ide_hwif_t *hwif = drive->hwif;
unsigned int cycle_time;
int active_time = 175;
int recovery_time = 415; /* worst case values from the dos driver */
+ u8 base = (hwif->config_data & 0xff00) >> 8;
if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) {
cycle_time = ide_pio_cycle_time(drive, pio);
@@ -299,21 +281,10 @@ static int __init qd_testreg(int port)
return (readreg != QD_TESTVAL);
}
-/*
- * qd_setup:
- *
- * called to setup an ata channel : adjusts attributes & links for tuning
- */
-
-static void __init qd_setup(ide_hwif_t *hwif, int base, int config)
-{
- hwif->select_data = base;
- hwif->config_data = config;
-}
-
static void __init qd6500_port_init_devs(ide_hwif_t *hwif)
{
- u8 base = hwif->select_data, config = QD_CONFIG(hwif);
+ u8 base = (hwif->config_data & 0xff00) >> 8;
+ u8 config = QD_CONFIG(hwif);
hwif->drives[0].drive_data = QD6500_DEF_DATA;
hwif->drives[1].drive_data = QD6500_DEF_DATA;
@@ -322,9 +293,10 @@ static void __init qd6500_port_init_devs(ide_hwif_t *hwif)
static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
{
u16 t1, t2;
- u8 base = hwif->select_data, config = QD_CONFIG(hwif);
+ u8 base = (hwif->config_data & 0xff00) >> 8;
+ u8 config = QD_CONFIG(hwif);
- if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) {
+ if (hwif->host_flags & IDE_HFLAG_SINGLE) {
t1 = QD6580_DEF_DATA;
t2 = QD6580_DEF_DATA2;
} else
@@ -334,11 +306,23 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
hwif->drives[1].drive_data = t2;
}
+static const struct ide_port_ops qd6500_port_ops = {
+ .port_init_devs = qd6500_port_init_devs,
+ .set_pio_mode = qd6500_set_pio_mode,
+ .selectproc = qd65xx_select,
+};
+
+static const struct ide_port_ops qd6580_port_ops = {
+ .port_init_devs = qd6580_port_init_devs,
+ .set_pio_mode = qd6580_set_pio_mode,
+ .selectproc = qd65xx_select,
+};
+
static const struct ide_port_info qd65xx_port_info __initdata = {
+ .name = DRV_NAME,
.chipset = ide_qd65xx,
.host_flags = IDE_HFLAG_IO_32BIT |
- IDE_HFLAG_NO_DMA |
- IDE_HFLAG_NO_AUTOTUNE,
+ IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
@@ -351,65 +335,41 @@ static const struct ide_port_info qd65xx_port_info __initdata = {
static int __init qd_probe(int base)
{
- ide_hwif_t *hwif;
- u8 config, unit;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- hw_regs_t hw[2];
+ int rc;
+ u8 config, unit, control;
+ struct ide_port_info d = qd65xx_port_info;
config = inb(QD_CONFIG_PORT);
if (! ((config & QD_CONFIG_BASEPORT) >> 1 == (base == 0xb0)) )
- return 1;
+ return -ENODEV;
unit = ! (config & QD_CONFIG_IDE_BASEPORT);
- memset(&hw, 0, sizeof(hw));
+ if (unit)
+ d.host_flags |= IDE_HFLAG_QD_2ND_PORT;
- ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
- hw[0].irq = 14;
+ switch (config & 0xf0) {
+ case QD_CONFIG_QD6500:
+ if (qd_testreg(base))
+ return -ENODEV; /* bad register */
- ide_std_init_ports(&hw[1], 0x170, 0x376);
- hw[1].irq = 15;
-
- if ((config & 0xf0) == QD_CONFIG_QD6500) {
-
- if (qd_testreg(base)) return 1; /* bad register */
-
- /* qd6500 found */
-
- hwif = &ide_hwifs[unit];
- printk(KERN_NOTICE "%s: qd6500 at %#x\n", hwif->name, base);
- printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
- config, QD_ID3);
-
if (config & QD_CONFIG_DISABLED) {
printk(KERN_WARNING "qd6500 is disabled !\n");
- return 1;
+ return -ENODEV;
}
- ide_init_port_hw(hwif, &hw[unit]);
-
- qd_setup(hwif, base, config);
-
- hwif->port_init_devs = qd6500_port_init_devs;
- hwif->set_pio_mode = &qd6500_set_pio_mode;
-
- idx[unit] = unit;
-
- ide_device_add(idx, &qd65xx_port_info);
-
- return 1;
- }
-
- if (((config & 0xf0) == QD_CONFIG_QD6580_A) ||
- ((config & 0xf0) == QD_CONFIG_QD6580_B)) {
-
- u8 control;
-
- if (qd_testreg(base) || qd_testreg(base+0x02)) return 1;
- /* bad registers */
+ printk(KERN_NOTICE "qd6500 at %#x\n", base);
+ printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
+ config, QD_ID3);
- /* qd6580 found */
+ d.port_ops = &qd6500_port_ops;
+ d.host_flags |= IDE_HFLAG_SINGLE;
+ break;
+ case QD_CONFIG_QD6580_A:
+ case QD_CONFIG_QD6580_B:
+ if (qd_testreg(base) || qd_testreg(base + 0x02))
+ return -ENODEV; /* bad registers */
control = inb(QD_CONTROL_PORT);
@@ -419,74 +379,44 @@ static int __init qd_probe(int base)
outb(QD_DEF_CONTR, QD_CONTROL_PORT);
- if (control & QD_CONTR_SEC_DISABLED) {
- /* secondary disabled */
-
- hwif = &ide_hwifs[unit];
- printk(KERN_INFO "%s: qd6580: single IDE board\n",
- hwif->name);
-
- ide_init_port_hw(hwif, &hw[unit]);
-
- qd_setup(hwif, base, config | (control << 8));
-
- hwif->port_init_devs = qd6580_port_init_devs;
- hwif->set_pio_mode = &qd6580_set_pio_mode;
-
- idx[unit] = unit;
+ d.port_ops = &qd6580_port_ops;
+ if (control & QD_CONTR_SEC_DISABLED)
+ d.host_flags |= IDE_HFLAG_SINGLE;
- ide_device_add(idx, &qd65xx_port_info);
-
- return 1;
- } else {
- ide_hwif_t *mate;
-
- hwif = &ide_hwifs[0];
- mate = &ide_hwifs[1];
- /* secondary enabled */
- printk(KERN_INFO "%s&%s: qd6580: dual IDE board\n",
- hwif->name, mate->name);
-
- ide_init_port_hw(hwif, &hw[0]);
- ide_init_port_hw(mate, &hw[1]);
-
- qd_setup(hwif, base, config | (control << 8));
-
- hwif->port_init_devs = qd6580_port_init_devs;
- hwif->set_pio_mode = &qd6580_set_pio_mode;
-
- qd_setup(mate, base, config | (control << 8));
-
- mate->port_init_devs = qd6580_port_init_devs;
- mate->set_pio_mode = &qd6580_set_pio_mode;
+ printk(KERN_INFO "qd6580: %s IDE board\n",
+ (control & QD_CONTR_SEC_DISABLED) ? "single" : "dual");
+ break;
+ default:
+ return -ENODEV;
+ }
- idx[0] = 0;
- idx[1] = 1;
+ rc = ide_legacy_device_add(&d, (base << 8) | config);
- ide_device_add(idx, &qd65xx_port_info);
+ if (d.host_flags & IDE_HFLAG_SINGLE)
+ return (rc == 0) ? 1 : rc;
- return 0; /* no other qd65xx possible */
- }
- }
- /* no qd65xx found */
- return 1;
+ return rc;
}
-int probe_qd65xx = 0;
+static int probe_qd65xx;
module_param_named(probe, probe_qd65xx, bool, 0);
MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
static int __init qd65xx_init(void)
{
+ int rc1, rc2 = -ENODEV;
+
if (probe_qd65xx == 0)
return -ENODEV;
- if (qd_probe(0x30))
- qd_probe(0xb0);
- if (ide_hwifs[0].chipset != ide_qd65xx &&
- ide_hwifs[1].chipset != ide_qd65xx)
+ rc1 = qd_probe(0x30);
+ if (rc1)
+ rc2 = qd_probe(0xb0);
+
+ if (rc1 < 0 && rc2 < 0)
return -ENODEV;
+
return 0;
}
diff --git a/drivers/ide/legacy/qd65xx.h b/drivers/ide/legacy/qd65xx.h
index 28dd50a15d5..c83dea85e62 100644
--- a/drivers/ide/legacy/qd65xx.h
+++ b/drivers/ide/legacy/qd65xx.h
@@ -30,7 +30,6 @@
#define QD_ID3 ((config & QD_CONFIG_ID3)!=0)
#define QD_CONFIG(hwif) ((hwif)->config_data & 0x00ff)
-#define QD_CONTROL(hwif) (((hwif)->config_data & 0xff00) >> 8)
#define QD_TIMING(drive) (byte)(((drive)->drive_data) & 0x00ff)
#define QD_TIMREG(drive) (byte)((((drive)->drive_data) & 0xff00) >> 8)
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
index bc1944811b9..b54a14a5775 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/legacy/umc8672.c
@@ -19,7 +19,7 @@
*/
/*
- * VLB Controller Support from
+ * VLB Controller Support from
* Wolfram Podien
* Rohoefe 3
* D28832 Achim
@@ -32,7 +32,7 @@
* #define UMC_DRIVE0 11
* in the beginning of the driver, which sets the speed of drive 0 to 11 (there
* are some lines present). 0 - 11 are allowed speed values. These values are
- * the results from the DOS speed test program supplied from UMC. 11 is the
+ * the results from the DOS speed test program supplied from UMC. 11 is the
* highest speed (about PIO mode 3)
*/
#define REALLY_SLOW_IO /* some systems can safely undef this */
@@ -51,6 +51,8 @@
#include <asm/io.h>
+#define DRV_NAME "umc8672"
+
/*
* Default speeds. These can be changed with "auto-tune" and/or hdparm.
*/
@@ -60,115 +62,103 @@
#define UMC_DRIVE3 1 /* In case of crash reduce speed */
static u8 current_speeds[4] = {UMC_DRIVE0, UMC_DRIVE1, UMC_DRIVE2, UMC_DRIVE3};
-static const u8 pio_to_umc [5] = {0,3,7,10,11}; /* rough guesses */
+static const u8 pio_to_umc [5] = {0, 3, 7, 10, 11}; /* rough guesses */
/* 0 1 2 3 4 5 6 7 8 9 10 11 */
static const u8 speedtab [3][12] = {
- {0xf, 0xb, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 },
- {0x3, 0x2, 0x2, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 },
- {0xff,0xcb,0xc0,0x58,0x36,0x33,0x23,0x22,0x21,0x11,0x10,0x0}};
+ {0x0f, 0x0b, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
+ {0x03, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1},
+ {0xff, 0xcb, 0xc0, 0x58, 0x36, 0x33, 0x23, 0x22, 0x21, 0x11, 0x10, 0x0}
+};
-static void out_umc (char port,char wert)
+static void out_umc(char port, char wert)
{
- outb_p(port,0x108);
- outb_p(wert,0x109);
+ outb_p(port, 0x108);
+ outb_p(wert, 0x109);
}
-static inline u8 in_umc (char port)
+static inline u8 in_umc(char port)
{
- outb_p(port,0x108);
+ outb_p(port, 0x108);
return inb_p(0x109);
}
-static void umc_set_speeds (u8 speeds[])
+static void umc_set_speeds(u8 speeds[])
{
int i, tmp;
- outb_p(0x5A,0x108); /* enable umc */
+ outb_p(0x5A, 0x108); /* enable umc */
- out_umc (0xd7,(speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4)));
- out_umc (0xd6,(speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4)));
+ out_umc(0xd7, (speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4)));
+ out_umc(0xd6, (speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4)));
tmp = 0;
- for (i = 3; i >= 0; i--) {
+ for (i = 3; i >= 0; i--)
tmp = (tmp << 2) | speedtab[1][speeds[i]];
+ out_umc(0xdc, tmp);
+ for (i = 0; i < 4; i++) {
+ out_umc(0xd0 + i, speedtab[2][speeds[i]]);
+ out_umc(0xd8 + i, speedtab[2][speeds[i]]);
}
- out_umc (0xdc,tmp);
- for (i = 0;i < 4; i++) {
- out_umc (0xd0+i,speedtab[2][speeds[i]]);
- out_umc (0xd8+i,speedtab[2][speeds[i]]);
- }
- outb_p(0xa5,0x108); /* disable umc */
+ outb_p(0xa5, 0x108); /* disable umc */
- printk ("umc8672: drive speeds [0 to 11]: %d %d %d %d\n",
+ printk("umc8672: drive speeds [0 to 11]: %d %d %d %d\n",
speeds[0], speeds[1], speeds[2], speeds[3]);
}
static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
{
+ ide_hwif_t *hwif = drive->hwif;
unsigned long flags;
- ide_hwgroup_t *hwgroup = ide_hwifs[HWIF(drive)->index^1].hwgroup;
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
drive->name, pio, pio_to_umc[pio]);
spin_lock_irqsave(&ide_lock, flags);
- if (hwgroup && hwgroup->handler != NULL) {
+ if (hwif->mate && hwif->mate->hwgroup->handler) {
printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
} else {
current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
- umc_set_speeds (current_speeds);
+ umc_set_speeds(current_speeds);
}
spin_unlock_irqrestore(&ide_lock, flags);
}
+static const struct ide_port_ops umc8672_port_ops = {
+ .set_pio_mode = umc_set_pio_mode,
+};
+
static const struct ide_port_info umc8672_port_info __initdata = {
+ .name = DRV_NAME,
.chipset = ide_umc8672,
- .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE,
+ .port_ops = &umc8672_port_ops,
+ .host_flags = IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
};
static int __init umc8672_probe(void)
{
unsigned long flags;
- static u8 idx[4] = { 0, 1, 0xff, 0xff };
- hw_regs_t hw[2];
if (!request_region(0x108, 2, "umc8672")) {
printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n");
return 1;
}
local_irq_save(flags);
- outb_p(0x5A,0x108); /* enable umc */
+ outb_p(0x5A, 0x108); /* enable umc */
if (in_umc (0xd5) != 0xa0) {
local_irq_restore(flags);
printk(KERN_ERR "umc8672: not found\n");
release_region(0x108, 2);
- return 1;
+ return 1;
}
- outb_p(0xa5,0x108); /* disable umc */
+ outb_p(0xa5, 0x108); /* disable umc */
- umc_set_speeds (current_speeds);
+ umc_set_speeds(current_speeds);
local_irq_restore(flags);
- memset(&hw, 0, sizeof(hw));
-
- ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
- hw[0].irq = 14;
-
- ide_std_init_ports(&hw[1], 0x170, 0x376);
- hw[1].irq = 15;
-
- ide_init_port_hw(&ide_hwifs[0], &hw[0]);
- ide_init_port_hw(&ide_hwifs[1], &hw[1]);
-
- ide_hwifs[0].set_pio_mode = &umc_set_pio_mode;
- ide_hwifs[1].set_pio_mode = &umc_set_pio_mode;
-
- ide_device_add(idx, &umc8672_port_info);
-
- return 0;
+ return ide_legacy_device_add(&umc8672_port_info, 0);
}
-int probe_umc8672 = 0;
+static int probe_umc8672;
module_param_named(probe, probe_umc8672, bool, 0);
MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 9b628248f2f..296b9c674ba 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -47,7 +47,6 @@
#define IDE_AU1XXX_BURSTMODE 1
static _auide_hwif auide_hwif;
-static int dbdma_init_done;
static int auide_ddma_init(_auide_hwif *auide);
@@ -61,7 +60,7 @@ void auide_insw(unsigned long port, void *addr, u32 count)
if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
DDMA_FLAGS_NOIE)) {
- printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
+ printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
return;
}
ctp = *((chan_tab_t **)ahwif->rx_chan);
@@ -79,7 +78,7 @@ void auide_outsw(unsigned long port, void *addr, u32 count)
if(!put_source_flags(ahwif->tx_chan, (void*)addr,
count << 1, DDMA_FLAGS_NOIE)) {
- printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
+ printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
return;
}
ctp = *((chan_tab_t **)ahwif->tx_chan);
@@ -250,7 +249,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
(void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
- __FUNCTION__, __LINE__);
+ __func__, __LINE__);
}
} else
{
@@ -258,7 +257,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
(void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
- __FUNCTION__, __LINE__);
+ __func__, __LINE__);
}
}
@@ -315,35 +314,6 @@ static int auide_dma_setup(ide_drive_t *drive)
return 0;
}
-static u8 auide_mdma_filter(ide_drive_t *drive)
-{
- /*
- * FIXME: ->white_list and ->black_list are based on completely bogus
- * ->ide_dma_check implementation which didn't set neither the host
- * controller timings nor the device for the desired transfer mode.
- *
- * They should be either removed or 0x00 MWDMA mask should be
- * returned for devices on the ->black_list.
- */
-
- if (dbdma_init_done == 0) {
- auide_hwif.white_list = ide_in_drive_list(drive->id,
- dma_white_list);
- auide_hwif.black_list = ide_in_drive_list(drive->id,
- dma_black_list);
- auide_hwif.drive = drive;
- auide_ddma_init(&auide_hwif);
- dbdma_init_done = 1;
- }
-
- /* Is the drive in our DMA black list? */
- if (auide_hwif.black_list)
- printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
- drive->name, drive->id->model);
-
- return drive->hwif->mwdma_mask;
-}
-
static int auide_dma_test_irq(ide_drive_t *drive)
{
if (drive->waiting_for_dma == 0)
@@ -396,41 +366,41 @@ static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 de
dev->dev_devwidth = devwidth;
dev->dev_flags = flags;
}
-
-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
static void auide_dma_timeout(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
- if (hwif->ide_dma_test_irq(drive))
+ if (auide_dma_test_irq(drive))
return;
- hwif->ide_dma_end(drive);
+ auide_dma_end(drive);
}
-
-static int auide_ddma_init(_auide_hwif *auide) {
-
+static const struct ide_dma_ops au1xxx_dma_ops = {
+ .dma_host_set = auide_dma_host_set,
+ .dma_setup = auide_dma_setup,
+ .dma_exec_cmd = auide_dma_exec_cmd,
+ .dma_start = auide_dma_start,
+ .dma_end = auide_dma_end,
+ .dma_test_irq = auide_dma_test_irq,
+ .dma_lost_irq = auide_dma_lost_irq,
+ .dma_timeout = auide_dma_timeout,
+};
+
+static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+ _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
dbdev_tab_t source_dev_tab, target_dev_tab;
u32 dev_id, tsize, devwidth, flags;
- ide_hwif_t *hwif = auide->hwif;
dev_id = AU1XXX_ATA_DDMA_REQ;
- if (auide->white_list || auide->black_list) {
- tsize = 8;
- devwidth = 32;
- }
- else {
- tsize = 1;
- devwidth = 16;
-
- printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
- printk(KERN_ERR " please read 'Documentation/mips/AU1xxx_IDE.README'");
- }
+ tsize = 8; /* 1 */
+ devwidth = 32; /* 16 */
#ifdef IDE_AU1XXX_BURSTMODE
flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
@@ -482,9 +452,9 @@ static int auide_ddma_init(_auide_hwif *auide) {
return 0;
}
#else
-
-static int auide_ddma_init( _auide_hwif *auide )
+static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
{
+ _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
dbdev_tab_t source_dev_tab;
int flags;
@@ -532,20 +502,28 @@ static int auide_ddma_init( _auide_hwif *auide )
static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
{
int i;
- unsigned long *ata_regs = hw->io_ports;
+ unsigned long *ata_regs = hw->io_ports_array;
/* FIXME? */
- for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
+ for (i = 0; i < 8; i++)
*ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
- }
/* set the Alternative Status register */
*ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
}
+static const struct ide_port_ops au1xxx_port_ops = {
+ .set_pio_mode = au1xxx_set_pio_mode,
+ .set_dma_mode = auide_set_dma_mode,
+};
+
static const struct ide_port_info au1xxx_port_info = {
+ .init_dma = auide_ddma_init,
+ .port_ops = &au1xxx_port_ops,
+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+ .dma_ops = &au1xxx_dma_ops,
+#endif
.host_flags = IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
IDE_HFLAG_NO_IO_32BIT |
IDE_HFLAG_UNMASK_IRQS,
.pio_mask = ATA_PIO4,
@@ -599,9 +577,11 @@ static int au_ide_probe(struct device *dev)
goto out;
}
- /* FIXME: This might possibly break PCMCIA IDE devices */
-
- hwif = &ide_hwifs[pdev->id];
+ hwif = ide_find_port();
+ if (hwif == NULL) {
+ ret = -ENOENT;
+ goto out;
+ }
memset(&hw, 0, sizeof(hw));
auide_setup_ports(&hw, ahwif);
@@ -613,8 +593,6 @@ static int au_ide_probe(struct device *dev)
hwif->dev = dev;
- hwif->mmio = 1;
-
/* If the user has selected DDMA assisted copies,
then set up a few local I/O function entry points
*/
@@ -623,34 +601,12 @@ static int au_ide_probe(struct device *dev)
hwif->INSW = auide_insw;
hwif->OUTSW = auide_outsw;
#endif
-
- hwif->set_pio_mode = &au1xxx_set_pio_mode;
- hwif->set_dma_mode = &auide_set_dma_mode;
-
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
- hwif->dma_timeout = &auide_dma_timeout;
-
- hwif->mdma_filter = &auide_mdma_filter;
-
- hwif->dma_host_set = &auide_dma_host_set;
- hwif->dma_exec_cmd = &auide_dma_exec_cmd;
- hwif->dma_start = &auide_dma_start;
- hwif->ide_dma_end = &auide_dma_end;
- hwif->dma_setup = &auide_dma_setup;
- hwif->ide_dma_test_irq = &auide_dma_test_irq;
- hwif->dma_lost_irq = &auide_dma_lost_irq;
-#endif
hwif->select_data = 0; /* no chipset-specific code */
hwif->config_data = 0; /* no chipset-specific code */
auide_hwif.hwif = hwif;
hwif->hwif_data = &auide_hwif;
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
- auide_ddma_init(&auide_hwif);
- dbdma_init_done = 1;
-#endif
-
idx[0] = hwif->index;
ide_device_add(idx, &au1xxx_port_info);
@@ -670,7 +626,7 @@ static int au_ide_remove(struct device *dev)
ide_hwif_t *hwif = dev_get_drvdata(dev);
_auide_hwif *ahwif = &auide_hwif;
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
iounmap((void *)ahwif->regbase);
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 956259fc09b..68947626e4a 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -76,17 +76,12 @@ static int __devinit swarm_ide_probe(struct device *dev)
if (!SIBYTE_HAVE_IDE)
return -ENODEV;
- /* Find an empty slot. */
- for (i = 0; i < MAX_HWIFS; i++)
- if (!ide_hwifs[i].io_ports[IDE_DATA_OFFSET])
- break;
- if (i >= MAX_HWIFS) {
+ hwif = ide_find_port();
+ if (hwif == NULL) {
printk(KERN_ERR DRV_NAME ": no free slot for interface\n");
return -ENOMEM;
}
- hwif = ide_hwifs + i;
-
base = ioremap(A_IO_EXT_BASE, 0x800);
offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
@@ -115,15 +110,13 @@ static int __devinit swarm_ide_probe(struct device *dev)
/* Setup MMIO ops. */
default_hwif_mmiops(hwif);
- /* Prevent resource map manipulation. */
- hwif->mmio = 1;
+
hwif->chipset = ide_generic;
- hwif->noprobe = 0;
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
- hwif->io_ports[i] =
+ for (i = 0; i <= 7; i++)
+ hwif->io_ports_array[i] =
(unsigned long)(base + ((0x1f0 + i) << 5));
- hwif->io_ports[IDE_CONTROL_OFFSET] =
+ hwif->io_ports.ctl_addr =
(unsigned long)(base + (0x3f6 << 5));
hwif->irq = K_INT_GB_IDE;
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index cfb3265bc1a..7f46c224b7c 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -135,12 +135,12 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
{
- drive->hwif->set_dma_mode(drive, pio + XFER_PIO_0);
+ drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0);
}
static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name)
{
- int bus_speed = system_bus_clock();
+ int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
if (bus_speed <= 33)
pci_set_drvdata(dev, (void *) aec6xxx_33_base);
@@ -175,27 +175,23 @@ static u8 __devinit atp86x_cable_detect(ide_hwif_t *hwif)
return (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
-static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- hwif->set_pio_mode = &aec_set_pio_mode;
-
- if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF)
- hwif->set_dma_mode = &aec6210_set_mode;
- else {
- hwif->set_dma_mode = &aec6260_set_mode;
+static const struct ide_port_ops atp850_port_ops = {
+ .set_pio_mode = aec_set_pio_mode,
+ .set_dma_mode = aec6210_set_mode,
+};
- hwif->cable_detect = atp86x_cable_detect;
- }
-}
+static const struct ide_port_ops atp86x_port_ops = {
+ .set_pio_mode = aec_set_pio_mode,
+ .set_dma_mode = aec6260_set_mode,
+ .cable_detect = atp86x_cable_detect,
+};
static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
{ /* 0 */
.name = "AEC6210",
.init_chipset = init_chipset_aec62xx,
- .init_hwif = init_hwif_aec62xx,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+ .port_ops = &atp850_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_NO_DSC |
@@ -207,7 +203,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
},{ /* 1 */
.name = "AEC6260",
.init_chipset = init_chipset_aec62xx,
- .init_hwif = init_hwif_aec62xx,
+ .port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_OFF_BOARD,
@@ -217,17 +213,18 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
},{ /* 2 */
.name = "AEC6260R",
.init_chipset = init_chipset_aec62xx,
- .init_hwif = init_hwif_aec62xx,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+ .port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_ABUSE_SET_DMA_MODE,
+ IDE_HFLAG_ABUSE_SET_DMA_MODE |
+ IDE_HFLAG_NON_BOOTABLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
},{ /* 3 */
.name = "AEC6280",
.init_chipset = init_chipset_aec62xx,
- .init_hwif = init_hwif_aec62xx,
+ .port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_OFF_BOARD,
@@ -237,8 +234,8 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
},{ /* 4 */
.name = "AEC6280R",
.init_chipset = init_chipset_aec62xx,
- .init_hwif = init_hwif_aec62xx,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+ .port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_OFF_BOARD,
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index b3b6f514ce2..b36a22b8c21 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -38,8 +38,6 @@
#include <asm/io.h>
-#define DISPLAY_ALI_TIMINGS
-
/*
* ALi devices are not plug in. Otherwise these static values would
* need to go. They ought to go away anyway
@@ -49,236 +47,6 @@ static u8 m5229_revision;
static u8 chip_is_1543c_e;
static struct pci_dev *isa_dev;
-#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
-#include <linux/stat.h>
-#include <linux/proc_fs.h>
-
-static u8 ali_proc = 0;
-
-static struct pci_dev *bmide_dev;
-
-static char *fifo[4] = {
- "FIFO Off",
- "FIFO On ",
- "DMA mode",
- "PIO mode" };
-
-static char *udmaT[8] = {
- "1.5T",
- " 2T",
- "2.5T",
- " 3T",
- "3.5T",
- " 4T",
- " 6T",
- " 8T"
-};
-
-static char *channel_status[8] = {
- "OK ",
- "busy ",
- "DRQ ",
- "DRQ busy ",
- "error ",
- "error busy ",
- "error DRQ ",
- "error DRQ busy"
-};
-
-/**
- * ali_get_info - generate proc file for ALi IDE
- * @buffer: buffer to fill
- * @addr: address of user start in buffer
- * @offset: offset into 'file'
- * @count: buffer count
- *
- * Walks the Ali devices and outputs summary data on the tuning and
- * anything else that will help with debugging
- */
-
-static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
-{
- unsigned long bibma;
- u8 reg53h, reg5xh, reg5yh, reg5xh1, reg5yh1, c0, c1, rev, tmp;
- char *q, *p = buffer;
-
- /* fetch rev. */
- pci_read_config_byte(bmide_dev, 0x08, &rev);
- if (rev >= 0xc1) /* M1543C or newer */
- udmaT[7] = " ???";
- else
- fifo[3] = " ??? ";
-
- /* first fetch bibma: */
-
- bibma = pci_resource_start(bmide_dev, 4);
-
- /*
- * at that point bibma+0x2 et bibma+0xa are byte
- * registers to investigate:
- */
- c0 = inb(bibma + 0x02);
- c1 = inb(bibma + 0x0a);
-
- p += sprintf(p,
- "\n Ali M15x3 Chipset.\n");
- p += sprintf(p,
- " ------------------\n");
- pci_read_config_byte(bmide_dev, 0x78, &reg53h);
- p += sprintf(p, "PCI Clock: %d.\n", reg53h);
-
- pci_read_config_byte(bmide_dev, 0x53, &reg53h);
- p += sprintf(p,
- "CD_ROM FIFO:%s, CD_ROM DMA:%s\n",
- (reg53h & 0x02) ? "Yes" : "No ",
- (reg53h & 0x01) ? "Yes" : "No " );
- pci_read_config_byte(bmide_dev, 0x74, &reg53h);
- p += sprintf(p,
- "FIFO Status: contains %d Words, runs%s%s\n\n",
- (reg53h & 0x3f),
- (reg53h & 0x40) ? " OVERWR" : "",
- (reg53h & 0x80) ? " OVERRD." : "." );
-
- p += sprintf(p,
- "-------------------primary channel"
- "-------------------secondary channel"
- "---------\n\n");
-
- pci_read_config_byte(bmide_dev, 0x09, &reg53h);
- p += sprintf(p,
- "channel status: %s"
- " %s\n",
- (reg53h & 0x20) ? "On " : "Off",
- (reg53h & 0x10) ? "On " : "Off" );
-
- p += sprintf(p,
- "both channels togth: %s"
- " %s\n",
- (c0&0x80) ? "No " : "Yes",
- (c1&0x80) ? "No " : "Yes" );
-
- pci_read_config_byte(bmide_dev, 0x76, &reg53h);
- p += sprintf(p,
- "Channel state: %s %s\n",
- channel_status[reg53h & 0x07],
- channel_status[(reg53h & 0x70) >> 4] );
-
- pci_read_config_byte(bmide_dev, 0x58, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x5c, &reg5yh);
- p += sprintf(p,
- "Add. Setup Timing: %dT"
- " %dT\n",
- (reg5xh & 0x07) ? (reg5xh & 0x07) : 8,
- (reg5yh & 0x07) ? (reg5yh & 0x07) : 8 );
-
- pci_read_config_byte(bmide_dev, 0x59, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x5d, &reg5yh);
- p += sprintf(p,
- "Command Act. Count: %dT"
- " %dT\n"
- "Command Rec. Count: %dT"
- " %dT\n\n",
- (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
- (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8,
- (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
- (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16 );
-
- p += sprintf(p,
- "----------------drive0-----------drive1"
- "------------drive0-----------drive1------\n\n");
- p += sprintf(p,
- "DMA enabled: %s %s"
- " %s %s\n",
- (c0&0x20) ? "Yes" : "No ",
- (c0&0x40) ? "Yes" : "No ",
- (c1&0x20) ? "Yes" : "No ",
- (c1&0x40) ? "Yes" : "No " );
-
- pci_read_config_byte(bmide_dev, 0x54, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x55, &reg5yh);
- q = "FIFO threshold: %2d Words %2d Words"
- " %2d Words %2d Words\n";
- if (rev < 0xc1) {
- if ((rev == 0x20) &&
- (pci_read_config_byte(bmide_dev, 0x4f, &tmp), (tmp &= 0x20))) {
- p += sprintf(p, q, 8, 8, 8, 8);
- } else {
- p += sprintf(p, q,
- (reg5xh & 0x03) + 12,
- ((reg5xh & 0x30)>>4) + 12,
- (reg5yh & 0x03) + 12,
- ((reg5yh & 0x30)>>4) + 12 );
- }
- } else {
- int t1 = (tmp = (reg5xh & 0x03)) ? (tmp << 3) : 4;
- int t2 = (tmp = ((reg5xh & 0x30)>>4)) ? (tmp << 3) : 4;
- int t3 = (tmp = (reg5yh & 0x03)) ? (tmp << 3) : 4;
- int t4 = (tmp = ((reg5yh & 0x30)>>4)) ? (tmp << 3) : 4;
- p += sprintf(p, q, t1, t2, t3, t4);
- }
-
-#if 0
- p += sprintf(p,
- "FIFO threshold: %2d Words %2d Words"
- " %2d Words %2d Words\n",
- (reg5xh & 0x03) + 12,
- ((reg5xh & 0x30)>>4) + 12,
- (reg5yh & 0x03) + 12,
- ((reg5yh & 0x30)>>4) + 12 );
-#endif
-
- p += sprintf(p,
- "FIFO mode: %s %s %s %s\n",
- fifo[((reg5xh & 0x0c) >> 2)],
- fifo[((reg5xh & 0xc0) >> 6)],
- fifo[((reg5yh & 0x0c) >> 2)],
- fifo[((reg5yh & 0xc0) >> 6)] );
-
- pci_read_config_byte(bmide_dev, 0x5a, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x5b, &reg5xh1);
- pci_read_config_byte(bmide_dev, 0x5e, &reg5yh);
- pci_read_config_byte(bmide_dev, 0x5f, &reg5yh1);
-
- p += sprintf(p,/*
- "------------------drive0-----------drive1"
- "------------drive0-----------drive1------\n")*/
- "Dt RW act. Cnt %2dT %2dT"
- " %2dT %2dT\n"
- "Dt RW rec. Cnt %2dT %2dT"
- " %2dT %2dT\n\n",
- (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
- (reg5xh1 & 0x70) ? ((reg5xh1 & 0x70) >> 4) : 8,
- (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8,
- (reg5yh1 & 0x70) ? ((reg5yh1 & 0x70) >> 4) : 8,
- (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
- (reg5xh1 & 0x0f) ? (reg5xh1 & 0x0f) : 16,
- (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16,
- (reg5yh1 & 0x0f) ? (reg5yh1 & 0x0f) : 16 );
-
- p += sprintf(p,
- "-----------------------------------UDMA Timings"
- "--------------------------------\n\n");
-
- pci_read_config_byte(bmide_dev, 0x56, &reg5xh);
- pci_read_config_byte(bmide_dev, 0x57, &reg5yh);
- p += sprintf(p,
- "UDMA: %s %s"
- " %s %s\n"
- "UDMA timings: %s %s"
- " %s %s\n\n",
- (reg5xh & 0x08) ? "OK" : "No",
- (reg5xh & 0x80) ? "OK" : "No",
- (reg5yh & 0x08) ? "OK" : "No",
- (reg5yh & 0x80) ? "OK" : "No",
- udmaT[(reg5xh & 0x07)],
- udmaT[(reg5xh & 0x70) >> 4],
- udmaT[reg5yh & 0x07],
- udmaT[(reg5yh & 0x70) >> 4] );
-
- return p-buffer; /* => must be less than 4k! */
-}
-#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
-
/**
* ali_set_pio_mode - set host controller for PIO mode
* @drive: drive
@@ -294,7 +62,7 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
int s_time, a_time, c_time;
u8 s_clc, a_clc, r_clc;
unsigned long flags;
- int bus_speed = system_bus_clock();
+ int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
int port = hwif->channel ? 0x5c : 0x58;
int portFIFO = hwif->channel ? 0x55 : 0x54;
u8 cd_dma_fifo = 0;
@@ -465,14 +233,6 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
- if (!ali_proc) {
- ali_proc = 1;
- bmide_dev = dev;
- ide_pci_create_host_proc("ali", ali_get_info);
- }
-#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
-
local_irq_save(flags);
if (m5229_revision < 0xC2) {
@@ -610,7 +370,7 @@ static int ali_cable_override(struct pci_dev *pdev)
}
/**
- * ata66_ali15x3 - check for UDMA 66 support
+ * ali_cable_detect - cable detection
* @hwif: IDE interface
*
* This checks if the controller and the cable are capable
@@ -620,7 +380,7 @@ static int ali_cable_override(struct pci_dev *pdev)
* FIXME: frobs bits that are not defined on newer ALi devicea
*/
-static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif)
+static u8 __devinit ali_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long flags;
@@ -652,27 +412,7 @@ static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif)
return cbl;
}
-/**
- * init_hwif_common_ali15x3 - Set up ALI IDE hardware
- * @hwif: IDE interface
- *
- * Initialize the IDE structure side of the ALi 15x3 driver.
- */
-
-static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &ali_set_pio_mode;
- hwif->set_dma_mode = &ali_set_dma_mode;
- hwif->udma_filter = &ali_udma_filter;
-
- hwif->cable_detect = ata66_ali15x3;
-
- if (hwif->dma_base == 0)
- return;
-
- hwif->dma_setup = &ali15x3_dma_setup;
-}
-
+#ifndef CONFIG_SPARC64
/**
* init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
* @hwif: interface to configure
@@ -722,35 +462,66 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
if(irq >= 0)
hwif->irq = irq;
}
-
- init_hwif_common_ali15x3(hwif);
}
+#endif
/**
* init_dma_ali15x3 - set up DMA on ALi15x3
* @hwif: IDE interface
- * @dmabase: DMA interface base PCI address
+ * @d: IDE port info
*
- * Set up the DMA functionality on the ALi 15x3. For the ALi
- * controllers this is generic so we can let the generic code do
- * the actual work.
+ * Set up the DMA functionality on the ALi 15x3.
*/
-static void __devinit init_dma_ali15x3 (ide_hwif_t *hwif, unsigned long dmabase)
+static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
+ const struct ide_port_info *d)
{
- if (m5229_revision < 0x20)
- return;
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ unsigned long base = ide_pci_dma_base(hwif, d);
+
+ if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+ return -1;
+
if (!hwif->channel)
- outb(inb(dmabase + 2) & 0x60, dmabase + 2);
- ide_setup_dma(hwif, dmabase);
+ outb(inb(base + 2) & 0x60, base + 2);
+
+ printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
+ hwif->name, base, base + 7);
+
+ if (ide_allocate_dma_engine(hwif))
+ return -1;
+
+ ide_setup_dma(hwif, base);
+
+ return 0;
}
+static const struct ide_port_ops ali_port_ops = {
+ .set_pio_mode = ali_set_pio_mode,
+ .set_dma_mode = ali_set_dma_mode,
+ .udma_filter = ali_udma_filter,
+ .cable_detect = ali_cable_detect,
+};
+
+static const struct ide_dma_ops ali_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ali15x3_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = __ide_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
+
static const struct ide_port_info ali15x3_chipset __devinitdata = {
.name = "ALI15X3",
.init_chipset = init_chipset_ali15x3,
+#ifndef CONFIG_SPARC64
.init_hwif = init_hwif_ali15x3,
+#endif
.init_dma = init_dma_ali15x3,
- .host_flags = IDE_HFLAG_BOOTABLE,
+ .port_ops = &ali_port_ops,
.pio_mask = ATA_PIO5,
.swdma_mask = ATA_SWDMA2,
.mwdma_mask = ATA_MWDMA2,
@@ -793,14 +564,17 @@ static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_dev
d.udma_mask = ATA_UDMA5;
else
d.udma_mask = ATA_UDMA6;
+
+ d.dma_ops = &ali_dma_ops;
+ } else {
+ d.host_flags |= IDE_HFLAG_NO_DMA;
+
+ d.mwdma_mask = d.swdma_mask = 0;
}
if (idx == 0)
d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
-#if defined(CONFIG_SPARC64)
- d.init_hwif = init_hwif_common_ali15x3;
-#endif /* CONFIG_SPARC64 */
return ide_setup_pci_device(dev, &d);
}
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 2ef890ce809..efcf54338be 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -179,7 +179,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev,
* Determine the system bus clock.
*/
- amd_clock = system_bus_clock() * 1000;
+ amd_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
switch (amd_clock) {
case 33000: amd_clock = 33333; break;
@@ -210,21 +210,20 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
if (hwif->irq == 0) /* 0 is bogus but will do for now */
hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel);
-
- hwif->set_pio_mode = &amd_set_pio_mode;
- hwif->set_dma_mode = &amd_set_drive;
-
- hwif->cable_detect = amd_cable_detect;
}
+static const struct ide_port_ops amd_port_ops = {
+ .set_pio_mode = amd_set_pio_mode,
+ .set_dma_mode = amd_set_drive,
+ .cable_detect = amd_cable_detect,
+};
+
#define IDE_HFLAGS_AMD \
(IDE_HFLAG_PIO_NO_BLACKLIST | \
- IDE_HFLAG_PIO_NO_DOWNGRADE | \
IDE_HFLAG_ABUSE_SET_DMA_MODE | \
IDE_HFLAG_POST_SET_MODE | \
IDE_HFLAG_IO_32BIT | \
- IDE_HFLAG_UNMASK_IRQS | \
- IDE_HFLAG_BOOTABLE)
+ IDE_HFLAG_UNMASK_IRQS)
#define DECLARE_AMD_DEV(name_str, swdma, udma) \
{ \
@@ -232,6 +231,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
.init_chipset = init_chipset_amd74xx, \
.init_hwif = init_hwif_amd74xx, \
.enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
+ .port_ops = &amd_port_ops, \
.host_flags = IDE_HFLAGS_AMD, \
.pio_mask = ATA_PIO5, \
.swdma_mask = swdma, \
@@ -245,6 +245,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
.init_chipset = init_chipset_amd74xx, \
.init_hwif = init_hwif_amd74xx, \
.enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
+ .port_ops = &amd_port_ops, \
.host_flags = IDE_HFLAGS_AMD, \
.pio_mask = ATA_PIO5, \
.swdma_mask = ATA_SWDMA2, \
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 7e037c880cb..8b637181681 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -130,37 +130,26 @@ static u8 __devinit atiixp_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA40;
}
-/**
- * init_hwif_atiixp - fill in the hwif for the ATIIXP
- * @hwif: IDE interface
- *
- * Set up the ide_hwif_t for the ATIIXP interface according to the
- * capabilities of the hardware.
- */
-
-static void __devinit init_hwif_atiixp(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &atiixp_set_pio_mode;
- hwif->set_dma_mode = &atiixp_set_dma_mode;
-
- hwif->cable_detect = atiixp_cable_detect;
-}
+static const struct ide_port_ops atiixp_port_ops = {
+ .set_pio_mode = atiixp_set_pio_mode,
+ .set_dma_mode = atiixp_set_dma_mode,
+ .cable_detect = atiixp_cable_detect,
+};
static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
{ /* 0 */
.name = "ATIIXP",
- .init_hwif = init_hwif_atiixp,
.enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
- .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE,
+ .port_ops = &atiixp_port_ops,
+ .host_flags = IDE_HFLAG_LEGACY_IRQS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
},{ /* 1 */
.name = "SB600_PATA",
- .init_hwif = init_hwif_atiixp,
.enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}},
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS |
- IDE_HFLAG_BOOTABLE,
+ .port_ops = &atiixp_port_ops,
+ .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index a1cfe033a55..aaf38109eae 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -4,7 +4,7 @@
/*
* Original authors: abramov@cecmow.enet.dec.com (Igor Abramov)
- * mlord@pobox.com (Mark Lord)
+ * mlord@pobox.com (Mark Lord)
*
* See linux/MAINTAINERS for address of current maintainer.
*
@@ -98,7 +98,7 @@
#define CMD640_PREFETCH_MASKS 1
-//#define CMD640_DUMP_REGS
+/*#define CMD640_DUMP_REGS */
#include <linux/types.h>
#include <linux/kernel.h>
@@ -109,10 +109,9 @@
#include <asm/io.h>
-/*
- * This flag is set in ide.c by the parameter: ide0=cmd640_vlb
- */
-int cmd640_vlb = 0;
+#define DRV_NAME "cmd640"
+
+static int cmd640_vlb;
/*
* CMD640 specific registers definition.
@@ -185,7 +184,6 @@ static DEFINE_SPINLOCK(cmd640_lock);
* These are initialized to point at the devices we control
*/
static ide_hwif_t *cmd_hwif0, *cmd_hwif1;
-static ide_drive_t *cmd_drives[4];
/*
* Interface to access cmd640x registers
@@ -207,13 +205,13 @@ static unsigned int cmd640_chip_version;
/* PCI method 1 access */
-static void put_cmd640_reg_pci1 (u16 reg, u8 val)
+static void put_cmd640_reg_pci1(u16 reg, u8 val)
{
outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
outb_p(val, (reg & 3) | 0xcfc);
}
-static u8 get_cmd640_reg_pci1 (u16 reg)
+static u8 get_cmd640_reg_pci1(u16 reg)
{
outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
return inb_p((reg & 3) | 0xcfc);
@@ -221,14 +219,14 @@ static u8 get_cmd640_reg_pci1 (u16 reg)
/* PCI method 2 access (from CMD datasheet) */
-static void put_cmd640_reg_pci2 (u16 reg, u8 val)
+static void put_cmd640_reg_pci2(u16 reg, u8 val)
{
outb_p(0x10, 0xcf8);
outb_p(val, cmd640_key + reg);
outb_p(0, 0xcf8);
}
-static u8 get_cmd640_reg_pci2 (u16 reg)
+static u8 get_cmd640_reg_pci2(u16 reg)
{
u8 b;
@@ -240,13 +238,13 @@ static u8 get_cmd640_reg_pci2 (u16 reg)
/* VLB access */
-static void put_cmd640_reg_vlb (u16 reg, u8 val)
+static void put_cmd640_reg_vlb(u16 reg, u8 val)
{
outb_p(reg, cmd640_key);
outb_p(val, cmd640_key + 4);
}
-static u8 get_cmd640_reg_vlb (u16 reg)
+static u8 get_cmd640_reg_vlb(u16 reg)
{
outb_p(reg, cmd640_key);
return inb_p(cmd640_key + 4);
@@ -268,11 +266,11 @@ static void put_cmd640_reg(u16 reg, u8 val)
unsigned long flags;
spin_lock_irqsave(&cmd640_lock, flags);
- __put_cmd640_reg(reg,val);
+ __put_cmd640_reg(reg, val);
spin_unlock_irqrestore(&cmd640_lock, flags);
}
-static int __init match_pci_cmd640_device (void)
+static int __init match_pci_cmd640_device(void)
{
const u8 ven_dev[4] = {0x95, 0x10, 0x40, 0x06};
unsigned int i;
@@ -292,7 +290,7 @@ static int __init match_pci_cmd640_device (void)
/*
* Probe for CMD640x -- pci method 1
*/
-static int __init probe_for_cmd640_pci1 (void)
+static int __init probe_for_cmd640_pci1(void)
{
__get_cmd640_reg = get_cmd640_reg_pci1;
__put_cmd640_reg = put_cmd640_reg_pci1;
@@ -308,7 +306,7 @@ static int __init probe_for_cmd640_pci1 (void)
/*
* Probe for CMD640x -- pci method 2
*/
-static int __init probe_for_cmd640_pci2 (void)
+static int __init probe_for_cmd640_pci2(void)
{
__get_cmd640_reg = get_cmd640_reg_pci2;
__put_cmd640_reg = put_cmd640_reg_pci2;
@@ -322,7 +320,7 @@ static int __init probe_for_cmd640_pci2 (void)
/*
* Probe for CMD640x -- vlb
*/
-static int __init probe_for_cmd640_vlb (void)
+static int __init probe_for_cmd640_vlb(void)
{
u8 b;
@@ -343,18 +341,18 @@ static int __init probe_for_cmd640_vlb (void)
* Returns 1 if an IDE interface/drive exists at 0x170,
* Returns 0 otherwise.
*/
-static int __init secondary_port_responding (void)
+static int __init secondary_port_responding(void)
{
unsigned long flags;
spin_lock_irqsave(&cmd640_lock, flags);
- outb_p(0x0a, 0x170 + IDE_SELECT_OFFSET); /* select drive0 */
+ outb_p(0x0a, 0x176); /* select drive0 */
udelay(100);
- if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x0a) {
- outb_p(0x1a, 0x170 + IDE_SELECT_OFFSET); /* select drive1 */
+ if ((inb_p(0x176) & 0x1f) != 0x0a) {
+ outb_p(0x1a, 0x176); /* select drive1 */
udelay(100);
- if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x1a) {
+ if ((inb_p(0x176) & 0x1f) != 0x1a) {
spin_unlock_irqrestore(&cmd640_lock, flags);
return 0; /* nothing responded */
}
@@ -367,7 +365,7 @@ static int __init secondary_port_responding (void)
/*
* Dump out all cmd640 registers. May be called from ide.c
*/
-static void cmd640_dump_regs (void)
+static void cmd640_dump_regs(void)
{
unsigned int reg = cmd640_vlb ? 0x50 : 0x00;
@@ -382,13 +380,13 @@ static void cmd640_dump_regs (void)
}
#endif
+#ifndef CONFIG_BLK_DEV_CMD640_ENHANCED
/*
* Check whether prefetch is on for a drive,
* and initialize the unmask flags for safe operation.
*/
-static void __init check_prefetch (unsigned int index)
+static void __init check_prefetch(ide_drive_t *drive, unsigned int index)
{
- ide_drive_t *drive = cmd_drives[index];
u8 b = get_cmd640_reg(prefetch_regs[index]);
if (b & prefetch_masks[index]) { /* is prefetch off? */
@@ -403,29 +401,12 @@ static void __init check_prefetch (unsigned int index)
drive->no_io_32bit = 0;
}
}
-
-/*
- * Figure out which devices we control
- */
-static void __init setup_device_ptrs (void)
-{
- cmd_hwif0 = &ide_hwifs[0];
- cmd_hwif1 = &ide_hwifs[1];
-
- cmd_drives[0] = &cmd_hwif0->drives[0];
- cmd_drives[1] = &cmd_hwif0->drives[1];
- cmd_drives[2] = &cmd_hwif1->drives[0];
- cmd_drives[3] = &cmd_hwif1->drives[1];
-}
-
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
-
+#else
/*
* Sets prefetch mode for a drive.
*/
-static void set_prefetch_mode (unsigned int index, int mode)
+static void set_prefetch_mode(ide_drive_t *drive, unsigned int index, int mode)
{
- ide_drive_t *drive = cmd_drives[index];
unsigned long flags;
int reg = prefetch_regs[index];
u8 b;
@@ -452,7 +433,7 @@ static void set_prefetch_mode (unsigned int index, int mode)
/*
* Dump out current drive clocks settings
*/
-static void display_clocks (unsigned int index)
+static void display_clocks(unsigned int index)
{
u8 active_count, recovery_count;
@@ -471,44 +452,16 @@ static void display_clocks (unsigned int index)
* Pack active and recovery counts into single byte representation
* used by controller
*/
-static inline u8 pack_nibbles (u8 upper, u8 lower)
+static inline u8 pack_nibbles(u8 upper, u8 lower)
{
return ((upper & 0x0f) << 4) | (lower & 0x0f);
}
/*
- * This routine retrieves the initial drive timings from the chipset.
- */
-static void __init retrieve_drive_counts (unsigned int index)
-{
- u8 b;
-
- /*
- * Get the internal setup timing, and convert to clock count
- */
- b = get_cmd640_reg(arttim_regs[index]) & ~0x3f;
- switch (b) {
- case 0x00: b = 4; break;
- case 0x80: b = 3; break;
- case 0x40: b = 2; break;
- default: b = 5; break;
- }
- setup_counts[index] = b;
-
- /*
- * Get the active/recovery counts
- */
- b = get_cmd640_reg(drwtim_regs[index]);
- active_counts[index] = (b >> 4) ? (b >> 4) : 0x10;
- recovery_counts[index] = (b & 0x0f) ? (b & 0x0f) : 0x10;
-}
-
-
-/*
* This routine writes the prepared setup/active/recovery counts
* for a drive into the cmd640 chipset registers to active them.
*/
-static void program_drive_counts (unsigned int index)
+static void program_drive_counts(ide_drive_t *drive, unsigned int index)
{
unsigned long flags;
u8 setup_count = setup_counts[index];
@@ -522,8 +475,11 @@ static void program_drive_counts (unsigned int index)
* so we merge the timings, using the slowest value for each timing.
*/
if (index > 1) {
- unsigned int mate;
- if (cmd_drives[mate = index ^ 1]->present) {
+ ide_hwif_t *hwif = drive->hwif;
+ ide_drive_t *peer = &hwif->drives[!drive->select.b.unit];
+ unsigned int mate = index ^ 1;
+
+ if (peer->present) {
if (setup_count < setup_counts[mate])
setup_count = setup_counts[mate];
if (active_count < active_counts[mate])
@@ -537,11 +493,11 @@ static void program_drive_counts (unsigned int index)
* Convert setup_count to internal chipset representation
*/
switch (setup_count) {
- case 4: setup_count = 0x00; break;
- case 3: setup_count = 0x80; break;
- case 1:
- case 2: setup_count = 0x40; break;
- default: setup_count = 0xc0; /* case 5 */
+ case 4: setup_count = 0x00; break;
+ case 3: setup_count = 0x80; break;
+ case 1:
+ case 2: setup_count = 0x40; break;
+ default: setup_count = 0xc0; /* case 5 */
}
/*
@@ -562,11 +518,19 @@ static void program_drive_counts (unsigned int index)
/*
* Set a specific pio_mode for a drive
*/
-static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle_time)
+static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
+ u8 pio_mode, unsigned int cycle_time)
{
int setup_time, active_time, recovery_time, clock_time;
u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count;
- int bus_speed = system_bus_clock();
+ int bus_speed;
+
+ if (cmd640_vlb && ide_vlb_clk)
+ bus_speed = ide_vlb_clk;
+ else if (!cmd640_vlb && ide_pci_clk)
+ bus_speed = ide_pci_clk;
+ else
+ bus_speed = system_bus_clock();
if (pio_mode > 5)
pio_mode = 5;
@@ -574,15 +538,15 @@ static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle
active_time = ide_pio_timings[pio_mode].active_time;
recovery_time = cycle_time - (setup_time + active_time);
clock_time = 1000 / bus_speed;
- cycle_count = (cycle_time + clock_time - 1) / clock_time;
+ cycle_count = DIV_ROUND_UP(cycle_time, clock_time);
- setup_count = (setup_time + clock_time - 1) / clock_time;
+ setup_count = DIV_ROUND_UP(setup_time, clock_time);
- active_count = (active_time + clock_time - 1) / clock_time;
+ active_count = DIV_ROUND_UP(active_time, clock_time);
if (active_count < 2)
active_count = 2; /* minimum allowed by cmd640 */
- recovery_count = (recovery_time + clock_time - 1) / clock_time;
+ recovery_count = DIV_ROUND_UP(recovery_time, clock_time);
recovery_count2 = cycle_count - (setup_count + active_count);
if (recovery_count2 > recovery_count)
recovery_count = recovery_count2;
@@ -611,7 +575,7 @@ static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle
* 1) this is the wrong place to do it (proper is do_special() in ide.c)
* 2) in practice this is rarely, if ever, necessary
*/
- program_drive_counts (index);
+ program_drive_counts(drive, index);
}
static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -619,32 +583,26 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
unsigned int index = 0, cycle_time;
u8 b;
- while (drive != cmd_drives[index]) {
- if (++index > 3) {
- printk(KERN_ERR "%s: bad news in %s\n",
- drive->name, __FUNCTION__);
- return;
- }
- }
switch (pio) {
- case 6: /* set fast-devsel off */
- case 7: /* set fast-devsel on */
- b = get_cmd640_reg(CNTRL) & ~0x27;
- if (pio & 1)
- b |= 0x27;
- put_cmd640_reg(CNTRL, b);
- printk("%s: %sabled cmd640 fast host timing (devsel)\n", drive->name, (pio & 1) ? "en" : "dis");
- return;
-
- case 8: /* set prefetch off */
- case 9: /* set prefetch on */
- set_prefetch_mode(index, pio & 1);
- printk("%s: %sabled cmd640 prefetch\n", drive->name, (pio & 1) ? "en" : "dis");
- return;
+ case 6: /* set fast-devsel off */
+ case 7: /* set fast-devsel on */
+ b = get_cmd640_reg(CNTRL) & ~0x27;
+ if (pio & 1)
+ b |= 0x27;
+ put_cmd640_reg(CNTRL, b);
+ printk("%s: %sabled cmd640 fast host timing (devsel)\n",
+ drive->name, (pio & 1) ? "en" : "dis");
+ return;
+ case 8: /* set prefetch off */
+ case 9: /* set prefetch on */
+ set_prefetch_mode(drive, index, pio & 1);
+ printk("%s: %sabled cmd640 prefetch\n",
+ drive->name, (pio & 1) ? "en" : "dis");
+ return;
}
cycle_time = ide_pio_cycle_time(drive, pio);
- cmd640_set_mode(index, pio, cycle_time);
+ cmd640_set_mode(drive, index, pio, cycle_time);
printk("%s: selected cmd640 PIO mode%d (%dns)",
drive->name, pio, cycle_time);
@@ -652,6 +610,9 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
display_clocks(index);
}
+static const struct ide_port_ops cmd640_port_ops = {
+ .set_pio_mode = cmd640_set_pio_mode,
+};
#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
static int pci_conf1(void)
@@ -693,14 +654,32 @@ static const struct ide_port_info cmd640_port_info __initdata = {
.chipset = ide_cmd640,
.host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_NO_DMA |
- IDE_HFLAG_NO_AUTOTUNE |
IDE_HFLAG_ABUSE_PREFETCH |
IDE_HFLAG_ABUSE_FAST_DEVSEL,
#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ .port_ops = &cmd640_port_ops,
.pio_mask = ATA_PIO5,
#endif
};
+static int cmd640x_init_one(unsigned long base, unsigned long ctl)
+{
+ if (!request_region(base, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+ DRV_NAME, base, base + 7);
+ return -EBUSY;
+ }
+
+ if (!request_region(ctl, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+ DRV_NAME, ctl);
+ release_region(base, 8);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/*
* Probe for a cmd640 chipset, and initialize it if found.
*/
@@ -709,7 +688,7 @@ static int __init cmd640x_init(void)
#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
int second_port_toggled = 0;
#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
- int second_port_cmd640 = 0;
+ int second_port_cmd640 = 0, rc;
const char *bus_type, *port2;
unsigned int index;
u8 b, cfr;
@@ -749,10 +728,21 @@ static int __init cmd640x_init(void)
cfr = get_cmd640_reg(CFR);
cmd640_chip_version = cfr & CFR_DEVREV;
if (cmd640_chip_version == 0) {
- printk ("ide: bad cmd640 revision: %d\n", cmd640_chip_version);
+ printk("ide: bad cmd640 revision: %d\n", cmd640_chip_version);
return 0;
}
+ rc = cmd640x_init_one(0x1f0, 0x3f6);
+ if (rc)
+ return rc;
+
+ rc = cmd640x_init_one(0x170, 0x376);
+ if (rc) {
+ release_region(0x3f6, 1);
+ release_region(0x1f0, 8);
+ return rc;
+ }
+
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
@@ -764,17 +754,15 @@ static int __init cmd640x_init(void)
printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
"\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
+ cmd_hwif0 = ide_find_port();
+
/*
* Initialize data for primary port
*/
- setup_device_ptrs ();
-
- ide_init_port_hw(cmd_hwif0, &hw[0]);
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
- cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode;
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-
- idx[0] = cmd_hwif0->index;
+ if (cmd_hwif0) {
+ ide_init_port_hw(cmd_hwif0, &hw[0]);
+ idx[0] = cmd_hwif0->index;
+ }
/*
* Ensure compatibility by always using the slowest timings
@@ -786,10 +774,13 @@ static int __init cmd640x_init(void)
put_cmd640_reg(CMDTIM, 0);
put_cmd640_reg(BRST, 0x40);
+ cmd_hwif1 = ide_find_port();
+
/*
* Try to enable the secondary interface, if not already enabled
*/
- if (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) {
+ if (cmd_hwif1 &&
+ cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe) {
port2 = "not probed";
} else {
b = get_cmd640_reg(CNTRL);
@@ -820,15 +811,11 @@ static int __init cmd640x_init(void)
/*
* Initialize data for secondary cmd640 port, if enabled
*/
- if (second_port_cmd640) {
+ if (second_port_cmd640 && cmd_hwif1) {
ide_init_port_hw(cmd_hwif1, &hw[1]);
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
- cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode;
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-
idx[1] = cmd_hwif1->index;
}
- printk(KERN_INFO "%s: %sserialized, secondary interface %s\n", cmd_hwif1->name,
+ printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
second_port_cmd640 ? "" : "not ", port2);
/*
@@ -836,35 +823,34 @@ static int __init cmd640x_init(void)
* Do not unnecessarily disturb any prior BIOS setup of these.
*/
for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) {
- ide_drive_t *drive = cmd_drives[index];
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
- if (drive->autotune || ((index > 1) && second_port_toggled)) {
- /*
- * Reset timing to the slowest speed and turn off prefetch.
- * This way, the drive identify code has a better chance.
- */
- setup_counts [index] = 4; /* max possible */
- active_counts [index] = 16; /* max possible */
- recovery_counts [index] = 16; /* max possible */
- program_drive_counts (index);
- set_prefetch_mode (index, 0);
- printk("cmd640: drive%d timings/prefetch cleared\n", index);
- } else {
- /*
- * Record timings/prefetch without changing them.
- * This preserves any prior BIOS setup.
- */
- retrieve_drive_counts (index);
- check_prefetch (index);
- printk("cmd640: drive%d timings/prefetch(%s) preserved",
- index, drive->no_io_32bit ? "off" : "on");
- display_clocks(index);
+ ide_drive_t *drive;
+
+ if (index > 1) {
+ if (cmd_hwif1 == NULL)
+ continue;
+ drive = &cmd_hwif1->drives[index & 1];
+ } else {
+ if (cmd_hwif0 == NULL)
+ continue;
+ drive = &cmd_hwif0->drives[index & 1];
}
+
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ /*
+ * Reset timing to the slowest speed and turn off prefetch.
+ * This way, the drive identify code has a better chance.
+ */
+ setup_counts [index] = 4; /* max possible */
+ active_counts [index] = 16; /* max possible */
+ recovery_counts [index] = 16; /* max possible */
+ program_drive_counts(drive, index);
+ set_prefetch_mode(drive, index, 0);
+ printk("cmd640: drive%d timings/prefetch cleared\n", index);
#else
/*
* Set the drive unmask flags to match the prefetch setting
*/
- check_prefetch (index);
+ check_prefetch(drive, index);
printk("cmd640: drive%d timings/prefetch(%s) preserved\n",
index, drive->no_io_32bit ? "off" : "on");
#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index edabe6299ef..08674711d08 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -68,8 +68,8 @@ static u8 quantize_timing(int timing, int quant)
*/
static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time)
{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- int clock_time = 1000 / system_bus_clock();
+ struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock());
u8 cycle_count, active_count, recovery_count, drwtim;
static const u8 recovery_values[] =
{15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
@@ -128,7 +128,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
ide_pio_timings[pio].active_time);
setup_count = quantize_timing(ide_pio_timings[pio].setup_time,
- 1000 / system_bus_clock());
+ 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock()));
/*
* The primary channel has individual address setup timing registers
@@ -223,7 +223,7 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
(void) pci_write_config_byte(dev, pciU, regU);
}
-static int cmd648_ide_dma_end (ide_drive_t *drive)
+static int cmd648_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long base = hwif->dma_base - (hwif->channel * 8);
@@ -239,7 +239,7 @@ static int cmd648_ide_dma_end (ide_drive_t *drive)
return err;
}
-static int cmd64x_ide_dma_end (ide_drive_t *drive)
+static int cmd64x_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -256,7 +256,7 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
return err;
}
-static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
+static int cmd648_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long base = hwif->dma_base - (hwif->channel * 8);
@@ -279,7 +279,7 @@ static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
return 0;
}
-static int cmd64x_ide_dma_test_irq (ide_drive_t *drive)
+static int cmd64x_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -310,7 +310,7 @@ static int cmd64x_ide_dma_test_irq (ide_drive_t *drive)
* event order for DMA transfers.
*/
-static int cmd646_1_ide_dma_end (ide_drive_t *drive)
+static int cmd646_1_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
u8 dma_stat = 0, dma_cmd = 0;
@@ -370,7 +370,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
return 0;
}
-static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif)
+static u8 __devinit cmd64x_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01;
@@ -385,91 +385,85 @@ static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif)
}
}
-static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- hwif->set_pio_mode = &cmd64x_set_pio_mode;
- hwif->set_dma_mode = &cmd64x_set_dma_mode;
-
- hwif->cable_detect = ata66_cmd64x;
+static const struct ide_port_ops cmd64x_port_ops = {
+ .set_pio_mode = cmd64x_set_pio_mode,
+ .set_dma_mode = cmd64x_set_dma_mode,
+ .cable_detect = cmd64x_cable_detect,
+};
- if (!hwif->dma_base)
- return;
+static const struct ide_dma_ops cmd64x_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = cmd64x_dma_end,
+ .dma_test_irq = cmd64x_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
- /*
- * UltraDMA only supported on PCI646U and PCI646U2, which
- * correspond to revisions 0x03, 0x05 and 0x07 respectively.
- * Actually, although the CMD tech support people won't
- * tell me the details, the 0x03 revision cannot support
- * UDMA correctly without hardware modifications, and even
- * then it only works with Quantum disks due to some
- * hold time assumptions in the 646U part which are fixed
- * in the 646U2.
- *
- * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
- */
- if (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 5)
- hwif->ultra_mask = 0x00;
+static const struct ide_dma_ops cmd646_rev1_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = cmd646_1_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
- switch (dev->device) {
- case PCI_DEVICE_ID_CMD_648:
- case PCI_DEVICE_ID_CMD_649:
- alt_irq_bits:
- hwif->ide_dma_end = &cmd648_ide_dma_end;
- hwif->ide_dma_test_irq = &cmd648_ide_dma_test_irq;
- break;
- case PCI_DEVICE_ID_CMD_646:
- if (dev->revision == 0x01) {
- hwif->ide_dma_end = &cmd646_1_ide_dma_end;
- break;
- } else if (dev->revision >= 0x03)
- goto alt_irq_bits;
- /* fall thru */
- default:
- hwif->ide_dma_end = &cmd64x_ide_dma_end;
- hwif->ide_dma_test_irq = &cmd64x_ide_dma_test_irq;
- break;
- }
-}
+static const struct ide_dma_ops cmd648_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = cmd648_dma_end,
+ .dma_test_irq = cmd648_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
{ /* 0 */
.name = "CMD643",
.init_chipset = init_chipset_cmd64x,
- .init_hwif = init_hwif_cmd64x,
.enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
+ .port_ops = &cmd64x_port_ops,
+ .dma_ops = &cmd64x_dma_ops,
.host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
- IDE_HFLAG_ABUSE_PREFETCH |
- IDE_HFLAG_BOOTABLE,
+ IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = 0x00, /* no udma */
},{ /* 1 */
.name = "CMD646",
.init_chipset = init_chipset_cmd64x,
- .init_hwif = init_hwif_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.chipset = ide_cmd646,
- .host_flags = IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_BOOTABLE,
+ .port_ops = &cmd64x_port_ops,
+ .dma_ops = &cmd648_dma_ops,
+ .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
},{ /* 2 */
.name = "CMD648",
.init_chipset = init_chipset_cmd64x,
- .init_hwif = init_hwif_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
- .host_flags = IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_BOOTABLE,
+ .port_ops = &cmd64x_port_ops,
+ .dma_ops = &cmd648_dma_ops,
+ .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
},{ /* 3 */
.name = "CMD649",
.init_chipset = init_chipset_cmd64x,
- .init_hwif = init_hwif_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
- .host_flags = IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_BOOTABLE,
+ .port_ops = &cmd64x_port_ops,
+ .dma_ops = &cmd648_dma_ops,
+ .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
@@ -483,12 +477,35 @@ static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_devic
d = cmd64x_chipsets[idx];
- /*
- * The original PCI0646 didn't have the primary channel enable bit,
- * it appeared starting with PCI0646U (i.e. revision ID 3).
- */
- if (idx == 1 && dev->revision < 3)
- d.enablebits[0].reg = 0;
+ if (idx == 1) {
+ /*
+ * UltraDMA only supported on PCI646U and PCI646U2, which
+ * correspond to revisions 0x03, 0x05 and 0x07 respectively.
+ * Actually, although the CMD tech support people won't
+ * tell me the details, the 0x03 revision cannot support
+ * UDMA correctly without hardware modifications, and even
+ * then it only works with Quantum disks due to some
+ * hold time assumptions in the 646U part which are fixed
+ * in the 646U2.
+ *
+ * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
+ */
+ if (dev->revision < 5) {
+ d.udma_mask = 0x00;
+ /*
+ * The original PCI0646 didn't have the primary
+ * channel enable bit, it appeared starting with
+ * PCI0646U (i.e. revision ID 3).
+ */
+ if (dev->revision < 3) {
+ d.enablebits[0].reg = 0;
+ if (dev->revision == 1)
+ d.dma_ops = &cmd646_rev1_dma_ops;
+ else
+ d.dma_ops = &cmd64x_dma_ops;
+ }
+ }
+ }
return ide_setup_pci_device(dev, &d);
}
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 1c163e4ef03..17669a43443 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -103,27 +103,32 @@ static void cs5520_dma_host_set(ide_drive_t *drive, int on)
ide_dma_host_set(drive, on);
}
-static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &cs5520_set_pio_mode;
- hwif->set_dma_mode = &cs5520_set_dma_mode;
-
- if (hwif->dma_base == 0)
- return;
+static const struct ide_port_ops cs5520_port_ops = {
+ .set_pio_mode = cs5520_set_pio_mode,
+ .set_dma_mode = cs5520_set_dma_mode,
+};
- hwif->dma_host_set = &cs5520_dma_host_set;
-}
+static const struct ide_dma_ops cs5520_dma_ops = {
+ .dma_host_set = cs5520_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = __ide_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
#define DECLARE_CS_DEV(name_str) \
{ \
.name = name_str, \
- .init_hwif = init_hwif_cs5520, \
+ .port_ops = &cs5520_port_ops, \
+ .dma_ops = &cs5520_dma_ops, \
.host_flags = IDE_HFLAG_ISA_PORTS | \
IDE_HFLAG_CS5520 | \
IDE_HFLAG_VDMA | \
IDE_HFLAG_NO_ATAPI_DMA | \
- IDE_HFLAG_ABUSE_SET_DMA_MODE |\
- IDE_HFLAG_BOOTABLE, \
+ IDE_HFLAG_ABUSE_SET_DMA_MODE, \
.pio_mask = ATA_PIO4, \
}
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
index 941a1344820..f5534c1ff34 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/pci/cs5530.c
@@ -228,29 +228,27 @@ static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif)
unsigned long basereg;
u32 d0_timings;
- hwif->set_pio_mode = &cs5530_set_pio_mode;
- hwif->set_dma_mode = &cs5530_set_dma_mode;
-
basereg = CS5530_BASEREG(hwif);
d0_timings = inl(basereg + 0);
if (CS5530_BAD_PIO(d0_timings))
outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 0);
if (CS5530_BAD_PIO(inl(basereg + 8)))
outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 8);
-
- if (hwif->dma_base == 0)
- return;
-
- hwif->udma_filter = cs5530_udma_filter;
}
+static const struct ide_port_ops cs5530_port_ops = {
+ .set_pio_mode = cs5530_set_pio_mode,
+ .set_dma_mode = cs5530_set_dma_mode,
+ .udma_filter = cs5530_udma_filter,
+};
+
static const struct ide_port_info cs5530_chipset __devinitdata = {
.name = "CS5530",
.init_chipset = init_chipset_cs5530,
.init_hwif = init_hwif_cs5530,
+ .port_ops = &cs5530_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_BOOTABLE,
+ IDE_HFLAG_POST_SET_MODE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index d7b5ea992e9..99fe91a191b 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -166,27 +166,17 @@ static u8 __devinit cs5535_cable_detect(ide_hwif_t *hwif)
return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
}
-/****
- * init_hwif_cs5535 - Initialize one ide cannel
- * @hwif: Channel descriptor
- *
- * This gets invoked by the IDE driver once for each channel. It
- * performs channel-specific pre-initialization before drive probing.
- *
- */
-static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &cs5535_set_pio_mode;
- hwif->set_dma_mode = &cs5535_set_dma_mode;
-
- hwif->cable_detect = cs5535_cable_detect;
-}
+static const struct ide_port_ops cs5535_port_ops = {
+ .set_pio_mode = cs5535_set_pio_mode,
+ .set_dma_mode = cs5535_set_dma_mode,
+ .cable_detect = cs5535_cable_detect,
+};
static const struct ide_port_info cs5535_chipset __devinitdata = {
.name = "CS5535",
- .init_hwif = init_hwif_cs5535,
+ .port_ops = &cs5535_port_ops,
.host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_ABUSE_SET_DMA_MODE | IDE_HFLAG_BOOTABLE,
+ IDE_HFLAG_ABUSE_SET_DMA_MODE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index 724cbacf4e5..77cc22c2ad4 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -6,7 +6,7 @@
*
* The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards.
* Writing the driver was quite simple, since most of the job is
- * done by the generic pci-ide support.
+ * done by the generic pci-ide support.
* The hard part was finding the CY82C693's datasheet on Cypress's
* web page :-(. But Altavista solved this problem :-).
*
@@ -15,12 +15,10 @@
* - I recently got a 16.8G IBM DTTA, so I was able to test it with
* a large and fast disk - the results look great, so I'd say the
* driver is working fine :-)
- * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA
- * - this is my first linux driver, so there's probably a lot of room
+ * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA
+ * - this is my first linux driver, so there's probably a lot of room
* for optimizations and bug fixing, so feel free to do it.
- * - use idebus=xx parameter to set PCI bus speed - needed to calc
- * timings for PIO modes (default will be 40)
- * - if using PIO mode it's a good idea to set the PIO mode and
+ * - if using PIO mode it's a good idea to set the PIO mode and
* 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda
* - I had some problems with my IBM DHEA with PIO modes < 2
* (lost interrupts) ?????
@@ -110,11 +108,11 @@ typedef struct pio_clocks_s {
* calc clocks using bus_speed
* returns (rounded up) time in bus clocks for time in ns
*/
-static int calc_clk (int time, int bus_speed)
+static int calc_clk(int time, int bus_speed)
{
int clocks;
- clocks = (time*bus_speed+999)/1000 -1;
+ clocks = (time*bus_speed+999)/1000 - 1;
if (clocks < 0)
clocks = 0;
@@ -132,11 +130,11 @@ static int calc_clk (int time, int bus_speed)
* NOTE: for mode 0,1 and 2 drives 8-bit IDE command control registers are used
* for mode 3 and 4 drives 8 and 16-bit timings are the same
*
- */
-static void compute_clocks (u8 pio, pio_clocks_t *p_pclk)
+ */
+static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
{
int clk1, clk2;
- int bus_speed = system_bus_clock(); /* get speed of PCI bus */
+ int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
/* we don't check against CY82C693's min and max speed,
* so you can play with the idebus=xx parameter
@@ -158,7 +156,7 @@ static void compute_clocks (u8 pio, pio_clocks_t *p_pclk)
clk1 = (clk1<<4)|clk2; /* combine active and recovery clocks */
/* note: we use the same values for 16bit IOR and IOW
- * those are all the same, since I don't have other
+ * those are all the same, since I don't have other
* timings than those from ide-lib.c
*/
@@ -186,7 +184,7 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
outb(index, CY82_INDEX_PORT);
data = inb(CY82_DATA_PORT);
- printk (KERN_INFO "%s (ch=%d, dev=%d): DMA mode is %d (single=%d)\n",
+ printk(KERN_INFO "%s (ch=%d, dev=%d): DMA mode is %d (single=%d)\n",
drive->name, HWIF(drive)->channel, drive->select.b.unit,
(data&0x3), ((data>>2)&1));
#endif /* CY82C693_DEBUG_LOGS */
@@ -202,7 +200,7 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
mode & 3, single);
#endif /* CY82C693_DEBUG_INFO */
- /*
+ /*
* note: below we set the value for Bus Master IDE TimeOut Register
* I'm not absolutly sure what this does, but it solved my problem
* with IDE DMA and sound, so I now can play sound and work with
@@ -216,8 +214,8 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
outb(CY82_INDEX_TIMEOUT, CY82_INDEX_PORT);
outb(data, CY82_DATA_PORT);
-#if CY82C693_DEBUG_INFO
- printk (KERN_INFO "%s: Set IDE Bus Master TimeOut Register to 0x%X\n",
+#if CY82C693_DEBUG_INFO
+ printk(KERN_INFO "%s: Set IDE Bus Master TimeOut Register to 0x%X\n",
drive->name, data);
#endif /* CY82C693_DEBUG_INFO */
}
@@ -242,14 +240,14 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
#if CY82C693_DEBUG_LOGS
/* for debug let's show the register values */
-
- if (drive->select.b.unit == 0) {
+
+ if (drive->select.b.unit == 0) {
/*
- * get master drive registers
+ * get master drive registers
* address setup control register
* is 32 bit !!!
- */
- pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
+ */
+ pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
addrCtrl &= 0x0F;
/* now let's get the remaining registers */
@@ -261,7 +259,7 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
* set slave drive registers
* address setup control register
* is 32 bit !!!
- */
+ */
pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
addrCtrl &= 0xF0;
@@ -288,9 +286,9 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
* set master drive
* address setup control register
* is 32 bit !!!
- */
+ */
pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
-
+
addrCtrl &= (~0xF);
addrCtrl |= (unsigned int)pclk.address_time;
pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
@@ -299,14 +297,14 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, pclk.time_16r);
pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, pclk.time_16w);
pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, pclk.time_8);
-
+
addrCtrl &= 0xF;
} else {
/*
* set slave drive
* address setup control register
* is 32 bit !!!
- */
+ */
pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
addrCtrl &= (~0xF0);
@@ -320,7 +318,7 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
addrCtrl >>= 4;
addrCtrl &= 0xF;
- }
+ }
#if CY82C693_DEBUG_INFO
printk(KERN_INFO "%s (ch=%d, dev=%d): set PIO timing to "
@@ -340,41 +338,41 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
#ifdef CY82C693_SETDMA_CLOCK
u8 data = 0;
-#endif /* CY82C693_SETDMA_CLOCK */
+#endif /* CY82C693_SETDMA_CLOCK */
/* write info about this verion of the driver */
printk(KERN_INFO CY82_VERSION "\n");
#ifdef CY82C693_SETDMA_CLOCK
/* okay let's set the DMA clock speed */
-
- outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT);
- data = inb(CY82_DATA_PORT);
+
+ outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT);
+ data = inb(CY82_DATA_PORT);
#if CY82C693_DEBUG_INFO
printk(KERN_INFO "%s: Peripheral Configuration Register: 0x%X\n",
name, data);
#endif /* CY82C693_DEBUG_INFO */
- /*
+ /*
* for some reason sometimes the DMA controller
* speed is set to ATCLK/2 ???? - we fix this here
- *
+ *
* note: i don't know what causes this strange behaviour,
* but even changing the dma speed doesn't solve it :-(
- * the ide performance is still only half the normal speed
- *
+ * the ide performance is still only half the normal speed
+ *
* if anybody knows what goes wrong with my machine, please
* let me know - ASK
- */
+ */
data |= 0x03;
- outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT);
- outb(data, CY82_DATA_PORT);
+ outb(CY82_INDEX_CTRLREG1, CY82_INDEX_PORT);
+ outb(data, CY82_DATA_PORT);
#if CY82C693_DEBUG_INFO
- printk (KERN_INFO "%s: New Peripheral Configuration Register: 0x%X\n",
+ printk(KERN_INFO "%s: New Peripheral Configuration Register: 0x%X\n",
name, data);
#endif /* CY82C693_DEBUG_INFO */
@@ -382,15 +380,6 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
return 0;
}
-/*
- * the init function - called for each ide channel once
- */
-static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &cy82c693_set_pio_mode;
- hwif->set_dma_mode = &cy82c693_set_dma_mode;
-}
-
static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
{
static ide_hwif_t *primary;
@@ -404,14 +393,18 @@ static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
}
}
+static const struct ide_port_ops cy82c693_port_ops = {
+ .set_pio_mode = cy82c693_set_pio_mode,
+ .set_dma_mode = cy82c693_set_dma_mode,
+};
+
static const struct ide_port_info cy82c693_chipset __devinitdata = {
.name = "CY82C693",
.init_chipset = init_chipset_cy82c693,
.init_iops = init_iops_cy82c693,
- .init_hwif = init_hwif_cy82c693,
+ .port_ops = &cy82c693_port_ops,
.chipset = ide_cy82c693,
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_CY82C693 |
- IDE_HFLAG_BOOTABLE,
+ .host_flags = IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.swdma_mask = ATA_SWDMA2,
.mwdma_mask = ATA_MWDMA2,
@@ -424,7 +417,7 @@ static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_dev
/* CY82C693 is more than only a IDE controller.
Function 1 is primary IDE channel, function 2 - secondary. */
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
+ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
PCI_FUNC(dev->devfn) == 1) {
dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
ret = ide_setup_pci_devices(dev, dev2, &cy82c693_chipset);
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index 961698d655e..b9e457996d0 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -43,6 +43,10 @@ static const u8 setup[] = {
0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13,
};
+static const struct ide_port_ops delkin_cb_port_ops = {
+ .quirkproc = ide_undecoded_slave,
+};
+
static int __devinit
delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
{
@@ -71,26 +75,21 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
if (setup[i])
outb(setup[i], base + i);
}
- pci_release_regions(dev); /* IDE layer handles regions itself */
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
hw.irq = dev->irq;
hw.chipset = ide_pci; /* this enables IRQ sharing */
- hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ hwif = ide_find_port();
if (hwif == NULL)
goto out_disable;
i = hwif->index;
- if (hwif->present)
- ide_unregister(i);
- else
- ide_init_port_data(hwif, i);
-
+ ide_init_port_data(hwif, i);
ide_init_port_hw(hwif, &hw);
- hwif->quirkproc = &ide_undecoded_slave;
+ hwif->port_ops = &delkin_cb_port_ops;
idx[0] = i;
@@ -110,6 +109,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
out_disable:
printk(KERN_ERR "delkin_cb: no IDE devices found\n");
+ pci_release_regions(dev);
pci_disable_device(dev);
return -ENODEV;
}
@@ -119,9 +119,9 @@ delkin_cb_remove (struct pci_dev *dev)
{
ide_hwif_t *hwif = pci_get_drvdata(dev);
- if (hwif)
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
+ pci_release_regions(dev);
pci_disable_device(dev);
}
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 7fd83a9d4de..041720e2276 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -38,8 +38,7 @@ MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE st
{ \
.name = name_str, \
.host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \
- extra_flags | \
- IDE_HFLAG_BOOTABLE, \
+ extra_flags, \
.swdma_mask = ATA_SWDMA2, \
.mwdma_mask = ATA_MWDMA2, \
.udma_mask = ATA_UDMA6, \
@@ -50,9 +49,8 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
{ /* 1 */
.name = "NS87410",
- .enablebits = {{0x43,0x08,0x08}, {0x47,0x08,0x08}},
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_BOOTABLE,
+ .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} },
+ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
.swdma_mask = ATA_SWDMA2,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
@@ -99,7 +97,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
* Called when the PCI registration layer (or the IDE initialization)
* finds a device matching our IDE device tables.
*/
-
+
static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &generic_chipsets[id->driver_data];
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 9f01da46b01..84c36c11719 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -115,11 +115,10 @@ static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev, const cha
return dev->irq;
}
-static void __devinit init_hwif_hpt34x(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &hpt34x_set_pio_mode;
- hwif->set_dma_mode = &hpt34x_set_mode;
-}
+static const struct ide_port_ops hpt34x_port_ops = {
+ .set_pio_mode = hpt34x_set_pio_mode,
+ .set_dma_mode = hpt34x_set_mode,
+};
#define IDE_HFLAGS_HPT34X \
(IDE_HFLAG_NO_ATAPI_DMA | \
@@ -131,16 +130,14 @@ static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
{ /* 0 */
.name = "HPT343",
.init_chipset = init_chipset_hpt34x,
- .init_hwif = init_hwif_hpt34x,
- .extra = 16,
- .host_flags = IDE_HFLAGS_HPT34X,
+ .port_ops = &hpt34x_port_ops,
+ .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_NON_BOOTABLE,
.pio_mask = ATA_PIO5,
},
{ /* 1 */
.name = "HPT345",
.init_chipset = init_chipset_hpt34x,
- .init_hwif = init_hwif_hpt34x,
- .extra = 16,
+ .port_ops = &hpt34x_port_ops,
.host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO5,
#ifdef CONFIG_HPT34X_AUTODMA
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 82d0e318a1f..c929dadaaaf 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -760,7 +760,7 @@ static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
}
} else
outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
- hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->io_ports.ctl_addr);
}
/*
@@ -776,7 +776,7 @@ static void hpt366_dma_lost_irq(ide_drive_t *drive)
pci_read_config_byte(dev, 0x52, &mcr3);
pci_read_config_byte(dev, 0x5a, &scr1);
printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n",
- drive->name, __FUNCTION__, mcr1, mcr3, scr1);
+ drive->name, __func__, mcr1, mcr3, scr1);
if (scr1 & 0x10)
pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
ide_dma_lost_irq(drive);
@@ -808,7 +808,7 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
hpt370_clear_engine(drive);
}
-static void hpt370_ide_dma_start(ide_drive_t *drive)
+static void hpt370_dma_start(ide_drive_t *drive)
{
#ifdef HPT_RESET_STATE_ENGINE
hpt370_clear_engine(drive);
@@ -816,7 +816,7 @@ static void hpt370_ide_dma_start(ide_drive_t *drive)
ide_dma_start(drive);
}
-static int hpt370_ide_dma_end(ide_drive_t *drive)
+static int hpt370_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
u8 dma_stat = inb(hwif->dma_status);
@@ -838,7 +838,7 @@ static void hpt370_dma_timeout(ide_drive_t *drive)
}
/* returns 1 if DMA IRQ issued, 0 otherwise */
-static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
+static int hpt374_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -858,11 +858,11 @@ static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
if (!drive->waiting_for_dma)
printk(KERN_WARNING "%s: (%s) called while not waiting\n",
- drive->name, __FUNCTION__);
+ drive->name, __func__);
return 0;
}
-static int hpt374_ide_dma_end(ide_drive_t *drive)
+static int hpt374_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -1271,17 +1271,6 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
/* Cache the channel's MISC. control registers' offset */
hwif->select_data = hwif->channel ? 0x54 : 0x50;
- hwif->set_pio_mode = &hpt3xx_set_pio_mode;
- hwif->set_dma_mode = &hpt3xx_set_mode;
-
- hwif->quirkproc = &hpt3xx_quirkproc;
- hwif->maskproc = &hpt3xx_maskproc;
-
- hwif->udma_filter = &hpt3xx_udma_filter;
- hwif->mdma_filter = &hpt3xx_mdma_filter;
-
- hwif->cable_detect = hpt3xx_cable_detect;
-
/*
* HPT3xxN chips have some complications:
*
@@ -1323,29 +1312,19 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
if (new_mcr != old_mcr)
pci_write_config_byte(dev, hwif->select_data + 1, new_mcr);
-
- if (hwif->dma_base == 0)
- return;
-
- if (chip_type >= HPT374) {
- hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
- hwif->ide_dma_end = &hpt374_ide_dma_end;
- } else if (chip_type >= HPT370) {
- hwif->dma_start = &hpt370_ide_dma_start;
- hwif->ide_dma_end = &hpt370_ide_dma_end;
- hwif->dma_timeout = &hpt370_dma_timeout;
- } else
- hwif->dma_lost_irq = &hpt366_dma_lost_irq;
}
-static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
+static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
+ const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 masterdma = 0, slavedma = 0;
- u8 dma_new = 0, dma_old = 0;
- unsigned long flags;
+ unsigned long flags, base = ide_pci_dma_base(hwif, d);
+ u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
- dma_old = inb(dmabase + 2);
+ if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+ return -1;
+
+ dma_old = inb(base + 2);
local_irq_save(flags);
@@ -1356,11 +1335,21 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
if (masterdma & 0x30) dma_new |= 0x20;
if ( slavedma & 0x30) dma_new |= 0x40;
if (dma_new != dma_old)
- outb(dma_new, dmabase + 2);
+ outb(dma_new, base + 2);
local_irq_restore(flags);
- ide_setup_dma(hwif, dmabase);
+ printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
+ hwif->name, base, base + 7);
+
+ hwif->extra_base = base + (hwif->channel ? 8 : 16);
+
+ if (ide_allocate_dma_engine(hwif))
+ return -1;
+
+ ide_setup_dma(hwif, base);
+
+ return 0;
}
static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
@@ -1416,6 +1405,49 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
IDE_HFLAG_ABUSE_SET_DMA_MODE | \
IDE_HFLAG_OFF_BOARD)
+static const struct ide_port_ops hpt3xx_port_ops = {
+ .set_pio_mode = hpt3xx_set_pio_mode,
+ .set_dma_mode = hpt3xx_set_mode,
+ .quirkproc = hpt3xx_quirkproc,
+ .maskproc = hpt3xx_maskproc,
+ .mdma_filter = hpt3xx_mdma_filter,
+ .udma_filter = hpt3xx_udma_filter,
+ .cable_detect = hpt3xx_cable_detect,
+};
+
+static const struct ide_dma_ops hpt37x_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = hpt374_dma_end,
+ .dma_test_irq = hpt374_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
+
+static const struct ide_dma_ops hpt370_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = hpt370_dma_start,
+ .dma_end = hpt370_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = hpt370_dma_timeout,
+};
+
+static const struct ide_dma_ops hpt36x_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = __ide_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = hpt366_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
+
static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
{ /* 0 */
.name = "HPT36x",
@@ -1429,7 +1461,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
* Bit 4 is for the primary channel, bit 5 for the secondary.
*/
.enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
- .extra = 240,
+ .port_ops = &hpt3xx_port_ops,
+ .dma_ops = &hpt36x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -1439,7 +1472,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- .extra = 240,
+ .port_ops = &hpt3xx_port_ops,
+ .dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -1449,7 +1483,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- .extra = 240,
+ .port_ops = &hpt3xx_port_ops,
+ .dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -1459,7 +1494,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- .extra = 240,
+ .port_ops = &hpt3xx_port_ops,
+ .dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -1470,7 +1506,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.init_dma = init_dma_hpt366,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.udma_mask = ATA_UDMA5,
- .extra = 240,
+ .port_ops = &hpt3xx_port_ops,
+ .dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -1480,7 +1517,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
- .extra = 240,
+ .port_ops = &hpt3xx_port_ops,
+ .dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -1543,6 +1581,10 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
d.name = info->chip_name;
d.udma_mask = info->udma_mask;
+ /* fixup ->dma_ops for HPT370/HPT370A */
+ if (info == &hpt370 || info == &hpt370a)
+ d.dma_ops = &hpt370_dma_ops;
+
pci_set_drvdata(dev, (void *)info);
if (info == &hpt36x || info == &hpt374)
@@ -1557,7 +1599,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
hpt374_init(dev, dev2);
else {
if (hpt36x_init(dev, dev2))
- d.host_flags |= IDE_HFLAG_BOOTABLE;
+ d.host_flags &= ~IDE_HFLAG_NON_BOOTABLE;
}
ret = ide_setup_pci_devices(dev, dev2, &d);
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index e3427eaab43..9053c8771e6 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -35,7 +35,7 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
static DEFINE_SPINLOCK(tune_lock);
int control = 0;
- static const u8 timings[][2]= {
+ static const u8 timings[][2] = {
{ 0, 0 },
{ 0, 0 },
{ 1, 0 },
@@ -105,11 +105,10 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
if (!(reg48 & u_flag))
pci_write_config_byte(dev, 0x48, reg48 | u_flag);
- if (speed >= XFER_UDMA_5) {
+ if (speed >= XFER_UDMA_5)
pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
- } else {
+ else
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
- }
if ((reg4a & a_speed) != u_speed)
pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
@@ -150,29 +149,18 @@ static u8 __devinit it8213_cable_detect(ide_hwif_t *hwif)
return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
-/**
- * init_hwif_it8213 - set up hwif structs
- * @hwif: interface to set up
- *
- * We do the basic set up of the interface structure.
- */
-
-static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
-{
- hwif->set_dma_mode = &it8213_set_dma_mode;
- hwif->set_pio_mode = &it8213_set_pio_mode;
-
- hwif->cable_detect = it8213_cable_detect;
-}
-
+static const struct ide_port_ops it8213_port_ops = {
+ .set_pio_mode = it8213_set_pio_mode,
+ .set_dma_mode = it8213_set_dma_mode,
+ .cable_detect = it8213_cable_detect,
+};
#define DECLARE_ITE_DEV(name_str) \
{ \
.name = name_str, \
- .init_hwif = init_hwif_it8213, \
- .enablebits = {{0x41,0x80,0x80}}, \
- .host_flags = IDE_HFLAG_SINGLE | \
- IDE_HFLAG_BOOTABLE, \
+ .enablebits = { {0x41, 0x80, 0x80} }, \
+ .port_ops = &it8213_port_ops, \
+ .host_flags = IDE_HFLAG_SINGLE, \
.pio_mask = ATA_PIO4, \
.swdma_mask = ATA_SWDMA2_ONLY, \
.mwdma_mask = ATA_MWDMA12_ONLY, \
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index d8a167451fd..6ab04115286 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -418,7 +418,7 @@ static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
}
/**
- * ata66_it821x - check for 80 pin cable
+ * it821x_cable_detect - cable detection
* @hwif: interface to check
*
* Check for the presence of an ATA66 capable cable on the
@@ -426,7 +426,7 @@ static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
* the needed logic onboard.
*/
-static u8 __devinit ata66_it821x(ide_hwif_t *hwif)
+static u8 __devinit it821x_cable_detect(ide_hwif_t *hwif)
{
/* The reference driver also only does disk side */
return ATA_CBL_PATA80;
@@ -511,6 +511,11 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
}
+static struct ide_dma_ops it821x_pass_through_dma_ops = {
+ .dma_start = it821x_dma_start,
+ .dma_end = it821x_dma_end,
+};
+
/**
* init_hwif_it821x - set up hwif structs
* @hwif: interface to set up
@@ -523,16 +528,10 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct it821x_dev *idev = kzalloc(sizeof(struct it821x_dev), GFP_KERNEL);
+ struct it821x_dev **itdevs = (struct it821x_dev **)pci_get_drvdata(dev);
+ struct it821x_dev *idev = itdevs[hwif->channel];
u8 conf;
- hwif->quirkproc = &it821x_quirkproc;
-
- if (idev == NULL) {
- printk(KERN_ERR "it821x: out of memory, falling back to legacy behaviour.\n");
- return;
- }
-
ide_set_hwifdata(hwif, idev);
pci_read_config_byte(dev, 0x50, &conf);
@@ -567,17 +566,11 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
}
if (idev->smart == 0) {
- hwif->set_pio_mode = &it821x_set_pio_mode;
- hwif->set_dma_mode = &it821x_set_dma_mode;
-
/* MWDMA/PIO clock switching for pass through mode */
- hwif->dma_start = &it821x_dma_start;
- hwif->ide_dma_end = &it821x_dma_end;
+ hwif->dma_ops = &it821x_pass_through_dma_ops;
} else
hwif->host_flags |= IDE_HFLAG_NO_SET_MODE;
- hwif->cable_detect = ata66_it821x;
-
if (hwif->dma_base == 0)
return;
@@ -617,13 +610,20 @@ static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const cha
return 0;
}
+static const struct ide_port_ops it821x_port_ops = {
+ /* it821x_set_{pio,dma}_mode() are only used in pass-through mode */
+ .set_pio_mode = it821x_set_pio_mode,
+ .set_dma_mode = it821x_set_dma_mode,
+ .quirkproc = it821x_quirkproc,
+ .cable_detect = it821x_cable_detect,
+};
#define DECLARE_ITE_DEV(name_str) \
{ \
.name = name_str, \
.init_chipset = init_chipset_it821x, \
.init_hwif = init_hwif_it821x, \
- .host_flags = IDE_HFLAG_BOOTABLE, \
+ .port_ops = &it821x_port_ops, \
.pio_mask = ATA_PIO4, \
}
@@ -642,6 +642,22 @@ static const struct ide_port_info it821x_chipsets[] __devinitdata = {
static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
+ struct it821x_dev *itdevs[2] = { NULL, NULL} , *itdev;
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ itdev = kzalloc(sizeof(*itdev), GFP_KERNEL);
+ if (itdev == NULL) {
+ kfree(itdevs[0]);
+ printk(KERN_ERR "it821x: out of memory\n");
+ return -ENOMEM;
+ }
+
+ itdevs[i] = itdev;
+ }
+
+ pci_set_drvdata(dev, itdevs);
+
return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]);
}
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index a56bcb4f22f..96ef7394f28 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -19,13 +19,13 @@ typedef enum {
} port_type;
/**
- * ata66_jmicron - Cable check
+ * jmicron_cable_detect - cable detection
* @hwif: IDE port
*
* Returns the cable type.
*/
-static u8 __devinit ata66_jmicron(ide_hwif_t *hwif)
+static u8 __devinit jmicron_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -63,8 +63,7 @@ static u8 __devinit ata66_jmicron(ide_hwif_t *hwif)
* actually do our cable checking etc. Thankfully we don't need
* to do the plumbing for other cases.
*/
- switch (port_map[port])
- {
+ switch (port_map[port]) {
case PORT_PATA0:
if (control & (1 << 3)) /* 40/80 pin primary */
return ATA_CBL_PATA40;
@@ -96,26 +95,16 @@ static void jmicron_set_dma_mode(ide_drive_t *drive, const u8 mode)
{
}
-/**
- * init_hwif_jmicron - set up hwif structs
- * @hwif: interface to set up
- *
- * Minimal set up is required for the Jmicron hardware.
- */
-
-static void __devinit init_hwif_jmicron(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &jmicron_set_pio_mode;
- hwif->set_dma_mode = &jmicron_set_dma_mode;
-
- hwif->cable_detect = ata66_jmicron;
-}
+static const struct ide_port_ops jmicron_port_ops = {
+ .set_pio_mode = jmicron_set_pio_mode,
+ .set_dma_mode = jmicron_set_dma_mode,
+ .cable_detect = jmicron_cable_detect,
+};
static const struct ide_port_info jmicron_chipset __devinitdata = {
.name = "JMB",
- .init_hwif = init_hwif_jmicron,
- .host_flags = IDE_HFLAG_BOOTABLE,
.enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
+ .port_ops = &jmicron_port_ops,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index 75513320aad..c13e299077e 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -72,8 +72,8 @@ static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
base = pci_resource_start(pdev, port * 2) & ~3;
dmabase = pci_resource_start(pdev, 4) & ~3;
- superio_ide_status[port] = base + IDE_STATUS_OFFSET;
- superio_ide_select[port] = base + IDE_SELECT_OFFSET;
+ superio_ide_status[port] = base + 7;
+ superio_ide_select[port] = base + 6;
superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
/* Clear error/interrupt, enable dma */
@@ -150,7 +150,7 @@ static void ns87415_selectproc (ide_drive_t *drive)
ns87415_prepare_drive (drive, drive->using_dma);
}
-static int ns87415_ide_dma_end (ide_drive_t *drive)
+static int ns87415_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
u8 dma_stat = 0, dma_cmd = 0;
@@ -170,7 +170,7 @@ static int ns87415_ide_dma_end (ide_drive_t *drive)
return (dma_stat & 7) != 4;
}
-static int ns87415_ide_dma_setup(ide_drive_t *drive)
+static int ns87415_dma_setup(ide_drive_t *drive)
{
/* select DMA xfer */
ns87415_prepare_drive(drive, 1);
@@ -195,8 +195,6 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
u8 stat;
#endif
- hwif->selectproc = &ns87415_selectproc;
-
/*
* We cannot probe for IRQ: both ports share common IRQ on INTA.
* Also, leave IRQ masked during drive probing, to prevent infinite
@@ -233,12 +231,12 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
* SELECT_DRIVE() properly during first ide_probe_port().
*/
timeout = 10000;
- outb(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ outb(12, hwif->io_ports.ctl_addr);
udelay(10);
- outb(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ outb(8, hwif->io_ports.ctl_addr);
do {
udelay(50);
- stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = hwif->INB(hwif->io_ports.status_addr);
if (stat == 0xff)
break;
} while ((stat & BUSY_STAT) && --timeout);
@@ -246,7 +244,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
}
if (!using_inta)
- hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
else if (!hwif->irq && hwif->mate && hwif->mate->irq)
hwif->irq = hwif->mate->irq; /* share IRQ with mate */
@@ -254,19 +252,33 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
return;
outb(0x60, hwif->dma_status);
- hwif->dma_setup = &ns87415_ide_dma_setup;
- hwif->ide_dma_end = &ns87415_ide_dma_end;
}
+static const struct ide_port_ops ns87415_port_ops = {
+ .selectproc = ns87415_selectproc,
+};
+
+static const struct ide_dma_ops ns87415_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ns87415_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = ns87415_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
+
static const struct ide_port_info ns87415_chipset __devinitdata = {
.name = "NS87415",
#ifdef CONFIG_SUPERIO
.init_iops = init_iops_ns87415,
#endif
.init_hwif = init_hwif_ns87415,
+ .port_ops = &ns87415_port_ops,
+ .dma_ops = &ns87415_dma_ops,
.host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_BOOTABLE,
+ IDE_HFLAG_NO_ATAPI_DMA,
};
static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/pci/opti621.c
index 46e8748f507..6e99080497b 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/pci/opti621.c
@@ -53,13 +53,12 @@
* If you then set the second drive to another PIO, the old value
* (automatically selected) will be overrided by yours.
* There is a 25/33MHz switch in configuration
- * register, but driver is written for use at any frequency which get
- * (use idebus=xx to select PCI bus speed).
+ * register, but driver is written for use at any frequency.
*
* Version 0.1, Nov 8, 1996
- * by Jaromir Koutek, for 2.1.8.
+ * by Jaromir Koutek, for 2.1.8.
* Initial version of driver.
- *
+ *
* Version 0.2
* Number 0.2 skipped.
*
@@ -75,7 +74,7 @@
* by Jaromir Koutek
* Updates for use with (again) new IDE block driver.
* Update of documentation.
- *
+ *
* Version 0.6, Jan 2, 1999
* by Jaromir Koutek
* Reversed to version 0.3 of the driver, because
@@ -208,29 +207,34 @@ typedef struct pio_clocks_s {
static void compute_clocks(int pio, pio_clocks_t *clks)
{
- if (pio != PIO_NOT_EXIST) {
- int adr_setup, data_pls;
- int bus_speed = system_bus_clock();
-
- adr_setup = ide_pio_timings[pio].setup_time;
- data_pls = ide_pio_timings[pio].active_time;
- clks->address_time = cmpt_clk(adr_setup, bus_speed);
- clks->data_time = cmpt_clk(data_pls, bus_speed);
- clks->recovery_time = cmpt_clk(ide_pio_timings[pio].cycle_time
- - adr_setup-data_pls, bus_speed);
- if (clks->address_time<1) clks->address_time = 1;
- if (clks->address_time>4) clks->address_time = 4;
- if (clks->data_time<1) clks->data_time = 1;
- if (clks->data_time>16) clks->data_time = 16;
- if (clks->recovery_time<2) clks->recovery_time = 2;
- if (clks->recovery_time>17) clks->recovery_time = 17;
+ if (pio != PIO_NOT_EXIST) {
+ int adr_setup, data_pls;
+ int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
+
+ adr_setup = ide_pio_timings[pio].setup_time;
+ data_pls = ide_pio_timings[pio].active_time;
+ clks->address_time = cmpt_clk(adr_setup, bus_speed);
+ clks->data_time = cmpt_clk(data_pls, bus_speed);
+ clks->recovery_time = cmpt_clk(ide_pio_timings[pio].cycle_time
+ - adr_setup-data_pls, bus_speed);
+ if (clks->address_time < 1)
+ clks->address_time = 1;
+ if (clks->address_time > 4)
+ clks->address_time = 4;
+ if (clks->data_time < 1)
+ clks->data_time = 1;
+ if (clks->data_time > 16)
+ clks->data_time = 16;
+ if (clks->recovery_time < 2)
+ clks->recovery_time = 2;
+ if (clks->recovery_time > 17)
+ clks->recovery_time = 17;
} else {
clks->address_time = 1;
clks->data_time = 1;
clks->recovery_time = 2;
/* minimal values */
}
-
}
static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -247,8 +251,8 @@ static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
/* sets drive->drive_data for both drives */
compute_pios(drive, pio);
- pio1 = hwif->drives[0].drive_data;
- pio2 = hwif->drives[1].drive_data;
+ pio1 = hwif->drives[0].drive_data;
+ pio2 = hwif->drives[1].drive_data;
compute_clocks(pio1, &first);
compute_clocks(pio2, &second);
@@ -275,7 +279,7 @@ static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
spin_lock_irqsave(&opti621_lock, flags);
- reg_base = hwif->io_ports[IDE_DATA_OFFSET];
+ reg_base = hwif->io_ports.data_addr;
/* allow Register-B */
outb(0xc0, reg_base + CNTRL_REG);
@@ -321,31 +325,25 @@ static void __devinit opti621_port_init_devs(ide_hwif_t *hwif)
hwif->drives[1].drive_data = PIO_DONT_KNOW;
}
-/*
- * init_hwif_opti621() is called once for each hwif found at boot.
- */
-static void __devinit init_hwif_opti621 (ide_hwif_t *hwif)
-{
- hwif->port_init_devs = opti621_port_init_devs;
- hwif->set_pio_mode = &opti621_set_pio_mode;
-}
+static const struct ide_port_ops opti621_port_ops = {
+ .port_init_devs = opti621_port_init_devs,
+ .set_pio_mode = opti621_set_pio_mode,
+};
static const struct ide_port_info opti621_chipsets[] __devinitdata = {
{ /* 0 */
.name = "OPTI621",
- .init_hwif = init_hwif_opti621,
- .enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}},
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_BOOTABLE,
+ .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
+ .port_ops = &opti621_port_ops,
+ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
.pio_mask = ATA_PIO3,
.swdma_mask = ATA_SWDMA2,
.mwdma_mask = ATA_MWDMA2,
- },{ /* 1 */
+ }, { /* 1 */
.name = "OPTI621X",
- .init_hwif = init_hwif_opti621,
- .enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}},
- .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
- IDE_HFLAG_BOOTABLE,
+ .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
+ .port_ops = &opti621_port_ops,
+ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
.pio_mask = ATA_PIO3,
.swdma_mask = ATA_SWDMA2,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 1c8cb7797a4..ec9bd7b352f 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -34,7 +34,7 @@
#undef DEBUG
#ifdef DEBUG
-#define DBG(fmt, args...) printk("%s: " fmt, __FUNCTION__, ## args)
+#define DBG(fmt, args...) printk("%s: " fmt, __func__, ## args)
#else
#define DBG(fmt, args...)
#endif
@@ -442,17 +442,6 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
return dev->irq;
}
-static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &pdcnew_set_pio_mode;
- hwif->set_dma_mode = &pdcnew_set_dma_mode;
-
- hwif->quirkproc = &pdcnew_quirkproc;
- hwif->resetproc = &pdcnew_reset;
-
- hwif->cable_detect = pdcnew_cable_detect;
-}
-
static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
{
struct pci_dev *dev2;
@@ -476,11 +465,19 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
return NULL;
}
+static const struct ide_port_ops pdcnew_port_ops = {
+ .set_pio_mode = pdcnew_set_pio_mode,
+ .set_dma_mode = pdcnew_set_dma_mode,
+ .quirkproc = pdcnew_quirkproc,
+ .resetproc = pdcnew_reset,
+ .cable_detect = pdcnew_cable_detect,
+};
+
#define DECLARE_PDCNEW_DEV(name_str, udma) \
{ \
.name = name_str, \
.init_chipset = init_chipset_pdcnew, \
- .init_hwif = init_hwif_pdc202new, \
+ .port_ops = &pdcnew_port_ops, \
.host_flags = IDE_HFLAG_POST_SET_MODE | \
IDE_HFLAG_ERROR_STOPS_FIFO | \
IDE_HFLAG_OFF_BOARD, \
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 150422ec3cf..fca89eda5c0 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -115,7 +115,7 @@ static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
pdc202xx_set_mode(drive, XFER_PIO_0 + pio);
}
-static u8 __devinit pdc2026x_old_cable_detect(ide_hwif_t *hwif)
+static u8 __devinit pdc2026x_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10);
@@ -163,7 +163,7 @@ static void pdc202xx_quirkproc(ide_drive_t *drive)
drive->quirk_list = 0;
}
-static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
+static void pdc202xx_dma_start(ide_drive_t *drive)
{
if (drive->current_speed > XFER_UDMA_2)
pdc_old_enable_66MHz_clock(drive->hwif);
@@ -185,7 +185,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
ide_dma_start(drive);
}
-static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
+static int pdc202xx_dma_end(ide_drive_t *drive)
{
if (drive->media != ide_disk || drive->addressing == 1) {
ide_hwif_t *hwif = HWIF(drive);
@@ -202,7 +202,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
return __ide_dma_end(drive);
}
-static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive)
+static int pdc202xx_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long high_16 = hwif->extra_base - 16;
@@ -226,26 +226,6 @@ somebody_else:
return (dma_stat & 4) == 4; /* return 1 if INTR asserted */
}
-static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = HWIF(drive);
-
- if (hwif->resetproc != NULL)
- hwif->resetproc(drive);
-
- ide_dma_lost_irq(drive);
-}
-
-static void pdc202xx_dma_timeout(ide_drive_t *drive)
-{
- ide_hwif_t *hwif = HWIF(drive);
-
- if (hwif->resetproc != NULL)
- hwif->resetproc(drive);
-
- ide_dma_timeout(drive);
-}
-
static void pdc202xx_reset_host (ide_hwif_t *hwif)
{
unsigned long high_16 = hwif->extra_base - 16;
@@ -271,68 +251,46 @@ static void pdc202xx_reset (ide_drive_t *drive)
ide_set_max_pio(drive);
}
-static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev,
- const char *name)
+static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
{
- return dev->irq;
+ pdc202xx_reset(drive);
+ ide_dma_lost_irq(drive);
}
-static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
+static void pdc202xx_dma_timeout(ide_drive_t *drive)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- hwif->set_pio_mode = &pdc202xx_set_pio_mode;
- hwif->set_dma_mode = &pdc202xx_set_mode;
-
- hwif->quirkproc = &pdc202xx_quirkproc;
-
- if (dev->device != PCI_DEVICE_ID_PROMISE_20246) {
- hwif->resetproc = &pdc202xx_reset;
-
- hwif->cable_detect = pdc2026x_old_cable_detect;
- }
-
- if (hwif->dma_base == 0)
- return;
-
- hwif->dma_lost_irq = &pdc202xx_dma_lost_irq;
- hwif->dma_timeout = &pdc202xx_dma_timeout;
-
- if (dev->device != PCI_DEVICE_ID_PROMISE_20246) {
- hwif->dma_start = &pdc202xx_old_ide_dma_start;
- hwif->ide_dma_end = &pdc202xx_old_ide_dma_end;
- }
- hwif->ide_dma_test_irq = &pdc202xx_old_ide_dma_test_irq;
+ pdc202xx_reset(drive);
+ ide_dma_timeout(drive);
}
-static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase)
+static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev,
+ const char *name)
{
+ unsigned long dmabase = pci_resource_start(dev, 4);
u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0;
- if (hwif->channel) {
- ide_setup_dma(hwif, dmabase);
- return;
- }
+ if (dmabase == 0)
+ goto out;
udma_speed_flag = inb(dmabase | 0x1f);
primary_mode = inb(dmabase | 0x1a);
secondary_mode = inb(dmabase | 0x1b);
printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \
"Primary %s Mode " \
- "Secondary %s Mode.\n", hwif->cds->name,
+ "Secondary %s Mode.\n", pci_name(dev),
(udma_speed_flag & 1) ? "EN" : "DIS",
(primary_mode & 1) ? "MASTER" : "PCI",
(secondary_mode & 1) ? "MASTER" : "PCI" );
if (!(udma_speed_flag & 1)) {
printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ",
- hwif->cds->name, udma_speed_flag,
+ pci_name(dev), udma_speed_flag,
(udma_speed_flag|1));
outb(udma_speed_flag | 1, dmabase | 0x1f);
printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN");
}
-
- ide_setup_dma(hwif, dmabase);
+out:
+ return dev->irq;
}
static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
@@ -357,13 +315,48 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
IDE_HFLAG_ABUSE_SET_DMA_MODE | \
IDE_HFLAG_OFF_BOARD)
+static const struct ide_port_ops pdc20246_port_ops = {
+ .set_pio_mode = pdc202xx_set_pio_mode,
+ .set_dma_mode = pdc202xx_set_mode,
+ .quirkproc = pdc202xx_quirkproc,
+};
+
+static const struct ide_port_ops pdc2026x_port_ops = {
+ .set_pio_mode = pdc202xx_set_pio_mode,
+ .set_dma_mode = pdc202xx_set_mode,
+ .quirkproc = pdc202xx_quirkproc,
+ .resetproc = pdc202xx_reset,
+ .cable_detect = pdc2026x_cable_detect,
+};
+
+static const struct ide_dma_ops pdc20246_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = __ide_dma_end,
+ .dma_test_irq = pdc202xx_dma_test_irq,
+ .dma_lost_irq = pdc202xx_dma_lost_irq,
+ .dma_timeout = pdc202xx_dma_timeout,
+};
+
+static const struct ide_dma_ops pdc2026x_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = pdc202xx_dma_start,
+ .dma_end = pdc202xx_dma_end,
+ .dma_test_irq = pdc202xx_dma_test_irq,
+ .dma_lost_irq = pdc202xx_dma_lost_irq,
+ .dma_timeout = pdc202xx_dma_timeout,
+};
+
#define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \
{ \
.name = name_str, \
.init_chipset = init_chipset_pdc202xx, \
- .init_hwif = init_hwif_pdc202xx, \
- .init_dma = init_dma_pdc202xx, \
- .extra = 48, \
+ .port_ops = &pdc2026x_port_ops, \
+ .dma_ops = &pdc2026x_dma_ops, \
.host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \
.pio_mask = ATA_PIO4, \
.mwdma_mask = ATA_MWDMA2, \
@@ -374,9 +367,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
{ /* 0 */
.name = "PDC20246",
.init_chipset = init_chipset_pdc202xx,
- .init_hwif = init_hwif_pdc202xx,
- .init_dma = init_dma_pdc202xx,
- .extra = 16,
+ .port_ops = &pdc20246_port_ops,
+ .dma_ops = &pdc20246_dma_ops,
.host_flags = IDE_HFLAGS_PDC202XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index decef0f4767..21c5dd23f92 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -285,11 +285,6 @@ static u8 __devinit piix_cable_detect(ide_hwif_t *hwif)
static void __devinit init_hwif_piix(ide_hwif_t *hwif)
{
- hwif->set_pio_mode = &piix_set_pio_mode;
- hwif->set_dma_mode = &piix_set_dma_mode;
-
- hwif->cable_detect = piix_cable_detect;
-
if (!hwif->dma_base)
return;
@@ -306,10 +301,16 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
hwif->ide_dma_clear_irq = &piix_dma_clear_irq;
}
+static const struct ide_port_ops piix_port_ops = {
+ .set_pio_mode = piix_set_pio_mode,
+ .set_dma_mode = piix_set_dma_mode,
+ .cable_detect = piix_cable_detect,
+};
+
#ifndef CONFIG_IA64
- #define IDE_HFLAGS_PIIX (IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE)
+ #define IDE_HFLAGS_PIIX IDE_HFLAG_LEGACY_IRQS
#else
- #define IDE_HFLAGS_PIIX IDE_HFLAG_BOOTABLE
+ #define IDE_HFLAGS_PIIX 0
#endif
#define DECLARE_PIIX_DEV(name_str, udma) \
@@ -317,6 +318,7 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
.name = name_str, \
.init_hwif = init_hwif_piix, \
.enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
+ .port_ops = &piix_port_ops, \
.host_flags = IDE_HFLAGS_PIIX, \
.pio_mask = ATA_PIO4, \
.swdma_mask = ATA_SWDMA2_ONLY, \
@@ -330,6 +332,7 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
.init_chipset = init_chipset_ich, \
.init_hwif = init_hwif_ich, \
.enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
+ .port_ops = &piix_port_ops, \
.host_flags = IDE_HFLAGS_PIIX, \
.pio_mask = ATA_PIO4, \
.swdma_mask = ATA_SWDMA2_ONLY, \
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/pci/rz1000.c
index 51676612f78..532154adba2 100644
--- a/drivers/ide/pci/rz1000.c
+++ b/drivers/ide/pci/rz1000.c
@@ -43,7 +43,7 @@ static const struct ide_port_info rz1000_chipset __devinitdata = {
.name = "RZ100x",
.init_hwif = init_hwif_rz1000,
.chipset = ide_rz1000,
- .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_BOOTABLE,
+ .host_flags = IDE_HFLAG_NO_DMA,
};
static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 561aa47c772..14c787b5d95 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -165,7 +165,7 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
*
* returns 1 on error, 0 otherwise
*/
-static int sc1200_ide_dma_end (ide_drive_t *drive)
+static int sc1200_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long dma_base = hwif->dma_base;
@@ -214,7 +214,7 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
ide_dma_off_quietly(drive);
if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma)
- hwif->dma_host_set(drive, 1);
+ hwif->dma_ops->dma_host_set(drive, 1);
return;
}
@@ -286,29 +286,30 @@ static int sc1200_resume (struct pci_dev *dev)
}
#endif
-/*
- * This gets invoked by the IDE driver once for each channel,
- * and performs channel-specific pre-initialization before drive probing.
- */
-static void __devinit init_hwif_sc1200 (ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &sc1200_set_pio_mode;
- hwif->set_dma_mode = &sc1200_set_dma_mode;
-
- if (hwif->dma_base == 0)
- return;
+static const struct ide_port_ops sc1200_port_ops = {
+ .set_pio_mode = sc1200_set_pio_mode,
+ .set_dma_mode = sc1200_set_dma_mode,
+ .udma_filter = sc1200_udma_filter,
+};
- hwif->udma_filter = sc1200_udma_filter;
- hwif->ide_dma_end = &sc1200_ide_dma_end;
-}
+static const struct ide_dma_ops sc1200_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = sc1200_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
static const struct ide_port_info sc1200_chipset __devinitdata = {
.name = "SC1200",
- .init_hwif = init_hwif_sc1200,
+ .port_ops = &sc1200_port_ops,
+ .dma_ops = &sc1200_dma_ops,
.host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_ABUSE_DMA_MODES |
- IDE_HFLAG_BOOTABLE,
+ IDE_HFLAG_ABUSE_DMA_MODES,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index ef07c7a8b97..ad7cdf9060c 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -65,7 +65,7 @@
static struct scc_ports {
unsigned long ctl, dma;
- unsigned char hwif_id; /* for removing hwif from system */
+ ide_hwif_t *hwif; /* for removing port from system */
} scc_ports[MAX_HWIFS];
/* PIO transfer mode table */
@@ -317,14 +317,14 @@ static int scc_dma_setup(ide_drive_t *drive)
/**
- * scc_ide_dma_end - Stop DMA
+ * scc_dma_end - Stop DMA
* @drive: IDE drive
*
* Check and clear INT Status register.
* Then call __ide_dma_end().
*/
-static int scc_ide_dma_end(ide_drive_t * drive)
+static int scc_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long intsts_port = hwif->dma_base + 0x014;
@@ -334,7 +334,7 @@ static int scc_ide_dma_end(ide_drive_t * drive)
/* errata A308 workaround: Step5 (check data loss) */
/* We don't check non ide_disk because it is limited to UDMA4 */
- if (!(in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET])
+ if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
& ERR_STAT) &&
drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
reg = in_be32((void __iomem *)intsts_port);
@@ -438,7 +438,7 @@ static int scc_dma_test_irq(ide_drive_t *drive)
u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
/* SCC errata A252,A308 workaround: Step4 */
- if ((in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET])
+ if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
& ERR_STAT) &&
(int_stat & INTSTS_INTRQ))
return 1;
@@ -449,7 +449,7 @@ static int scc_dma_test_irq(ide_drive_t *drive)
if (!drive->waiting_for_dma)
printk(KERN_WARNING "%s: (%s) called while not waiting\n",
- drive->name, __FUNCTION__);
+ drive->name, __func__);
return 0;
}
@@ -483,7 +483,7 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
unsigned long dma_size = pci_resource_len(dev, 1);
void __iomem *ctl_addr;
void __iomem *dma_addr;
- int i;
+ int i, ret;
for (i = 0; i < MAX_HWIFS; i++) {
if (scc_ports[i].ctl == 0)
@@ -492,21 +492,17 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
if (i >= MAX_HWIFS)
return -ENOMEM;
- if (!request_mem_region(ctl_base, ctl_size, name)) {
- printk(KERN_WARNING "%s: IDE controller MMIO ports not available.\n", SCC_PATA_NAME);
- goto fail_0;
- }
-
- if (!request_mem_region(dma_base, dma_size, name)) {
- printk(KERN_WARNING "%s: IDE controller MMIO ports not available.\n", SCC_PATA_NAME);
- goto fail_1;
+ ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: can't reserve resources\n", name);
+ return ret;
}
if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
- goto fail_2;
+ goto fail_0;
if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
- goto fail_3;
+ goto fail_1;
pci_set_master(dev);
scc_ports[i].ctl = (unsigned long)ctl_addr;
@@ -515,12 +511,8 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
return 1;
- fail_3:
- iounmap(ctl_addr);
- fail_2:
- release_mem_region(dma_base, dma_size);
fail_1:
- release_mem_region(ctl_base, ctl_size);
+ iounmap(ctl_addr);
fail_0:
return -ENOMEM;
}
@@ -534,26 +526,21 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
int i;
- for (i = 0; i < MAX_HWIFS; i++) {
- hwif = &ide_hwifs[i];
- if (hwif->chipset == ide_unknown)
- break; /* pick an unused entry */
- }
- if (i == MAX_HWIFS) {
+ hwif = ide_find_port();
+ if (hwif == NULL) {
printk(KERN_ERR "%s: too many IDE interfaces, "
"no room in table\n", SCC_PATA_NAME);
return -ENOMEM;
}
memset(&hw, 0, sizeof(hw));
- for (i = IDE_DATA_OFFSET; i <= IDE_CONTROL_OFFSET; i++)
- hw.io_ports[i] = ports->dma + 0x20 + i * 4;
+ for (i = 0; i <= 8; i++)
+ hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
hw.irq = dev->irq;
hw.dev = &dev->dev;
hw.chipset = ide_pci;
ide_init_port_hw(hwif, &hw);
hwif->dev = &dev->dev;
- hwif->cds = d;
idx[0] = hwif->index;
@@ -696,7 +683,7 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
{
struct scc_ports *ports = ide_get_hwifdata(hwif);
- ports->hwif_id = hwif->index;
+ ports->hwif = hwif;
hwif->dma_command = hwif->dma_base;
hwif->dma_status = hwif->dma_base + 0x04;
@@ -705,28 +692,38 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
/* PTERADD */
out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
- hwif->dma_setup = scc_dma_setup;
- hwif->ide_dma_end = scc_ide_dma_end;
- hwif->set_pio_mode = scc_set_pio_mode;
- hwif->set_dma_mode = scc_set_dma_mode;
- hwif->ide_dma_test_irq = scc_dma_test_irq;
- hwif->udma_filter = scc_udma_filter;
-
if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
else
hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
-
- hwif->cable_detect = scc_cable_detect;
}
+static const struct ide_port_ops scc_port_ops = {
+ .set_pio_mode = scc_set_pio_mode,
+ .set_dma_mode = scc_set_dma_mode,
+ .udma_filter = scc_udma_filter,
+ .cable_detect = scc_cable_detect,
+};
+
+static const struct ide_dma_ops scc_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = scc_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = ide_dma_start,
+ .dma_end = scc_dma_end,
+ .dma_test_irq = scc_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
+
#define DECLARE_SCC_DEV(name_str) \
{ \
.name = name_str, \
.init_iops = init_iops_scc, \
.init_hwif = init_hwif_scc, \
- .host_flags = IDE_HFLAG_SINGLE | \
- IDE_HFLAG_BOOTABLE, \
+ .port_ops = &scc_port_ops, \
+ .dma_ops = &scc_dma_ops, \
+ .host_flags = IDE_HFLAG_SINGLE, \
.pio_mask = ATA_PIO4, \
}
@@ -758,11 +755,7 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
static void __devexit scc_remove(struct pci_dev *dev)
{
struct scc_ports *ports = pci_get_drvdata(dev);
- ide_hwif_t *hwif = &ide_hwifs[ports->hwif_id];
- unsigned long ctl_base = pci_resource_start(dev, 0);
- unsigned long dma_base = pci_resource_start(dev, 1);
- unsigned long ctl_size = pci_resource_len(dev, 0);
- unsigned long dma_size = pci_resource_len(dev, 1);
+ ide_hwif_t *hwif = ports->hwif;
if (hwif->dmatable_cpu) {
pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
@@ -770,13 +763,11 @@ static void __devexit scc_remove(struct pci_dev *dev)
hwif->dmatable_cpu = NULL;
}
- ide_unregister(hwif->index);
+ ide_unregister(hwif);
- hwif->chipset = ide_unknown;
iounmap((void*)ports->dma);
iounmap((void*)ports->ctl);
- release_mem_region(dma_base, dma_size);
- release_mem_region(ctl_base, ctl_size);
+ pci_release_selected_regions(dev, (1 << 2) - 1);
memset(ports, 0, sizeof(*ports));
}
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index c11880b0709..a1fb20826a5 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -312,7 +312,7 @@ static u8 __devinit ata66_svwks_cobalt(ide_hwif_t *hwif)
return ATA_CBL_PATA40;
}
-static u8 __devinit ata66_svwks(ide_hwif_t *hwif)
+static u8 __devinit svwks_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -336,28 +336,28 @@ static u8 __devinit ata66_svwks(ide_hwif_t *hwif)
return ATA_CBL_PATA40;
}
-static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
-
- hwif->set_pio_mode = &svwks_set_pio_mode;
- hwif->set_dma_mode = &svwks_set_dma_mode;
- hwif->udma_filter = &svwks_udma_filter;
+static const struct ide_port_ops osb4_port_ops = {
+ .set_pio_mode = svwks_set_pio_mode,
+ .set_dma_mode = svwks_set_dma_mode,
+ .udma_filter = svwks_udma_filter,
+};
- if (dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE)
- hwif->cable_detect = ata66_svwks;
-}
+static const struct ide_port_ops svwks_port_ops = {
+ .set_pio_mode = svwks_set_pio_mode,
+ .set_dma_mode = svwks_set_dma_mode,
+ .udma_filter = svwks_udma_filter,
+ .cable_detect = svwks_cable_detect,
+};
#define IDE_HFLAGS_SVWKS \
(IDE_HFLAG_LEGACY_IRQS | \
- IDE_HFLAG_ABUSE_SET_DMA_MODE | \
- IDE_HFLAG_BOOTABLE)
+ IDE_HFLAG_ABUSE_SET_DMA_MODE)
static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
{ /* 0 */
.name = "SvrWks OSB4",
.init_chipset = init_chipset_svwks,
- .init_hwif = init_hwif_svwks,
+ .port_ops = &osb4_port_ops,
.host_flags = IDE_HFLAGS_SVWKS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -365,7 +365,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
},{ /* 1 */
.name = "SvrWks CSB5",
.init_chipset = init_chipset_svwks,
- .init_hwif = init_hwif_svwks,
+ .port_ops = &svwks_port_ops,
.host_flags = IDE_HFLAGS_SVWKS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -373,7 +373,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
},{ /* 2 */
.name = "SvrWks CSB6",
.init_chipset = init_chipset_svwks,
- .init_hwif = init_hwif_svwks,
+ .port_ops = &svwks_port_ops,
.host_flags = IDE_HFLAGS_SVWKS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -381,7 +381,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
},{ /* 3 */
.name = "SvrWks CSB6",
.init_chipset = init_chipset_svwks,
- .init_hwif = init_hwif_svwks,
+ .port_ops = &svwks_port_ops,
.host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -389,7 +389,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
},{ /* 4 */
.name = "SvrWks HT1000",
.init_chipset = init_chipset_svwks,
- .init_hwif = init_hwif_svwks,
+ .port_ops = &svwks_port_ops,
.host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -418,7 +418,7 @@ static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device
else if (idx == 2 || idx == 3) {
if ((PCI_FUNC(dev->devfn) & 1) == 0) {
if (pci_resource_start(dev, 0) != 0x01f1)
- d.host_flags &= ~IDE_HFLAG_BOOTABLE;
+ d.host_flags |= IDE_HFLAG_NON_BOOTABLE;
d.host_flags |= IDE_HFLAG_SINGLE;
} else
d.host_flags &= ~IDE_HFLAG_SINGLE;
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 9d1a3038af9..63e28f4e6d3 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -98,28 +98,28 @@ sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
int i;
/* Registers are word (32 bit) aligned */
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
- hw->io_ports[i] = reg + i * 4;
+ for (i = 0; i <= 7; i++)
+ hw->io_ports_array[i] = reg + i * 4;
if (ctrl_port)
- hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+ hw->io_ports.ctl_addr = ctrl_port;
if (irq_port)
- hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+ hw->io_ports.irq_addr = irq_port;
}
static void
sgiioc4_maskproc(ide_drive_t * drive, int mask)
{
writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
- (void __iomem *)drive->hwif->io_ports[IDE_CONTROL_OFFSET]);
+ (void __iomem *)drive->hwif->io_ports.ctl_addr);
}
static int
sgiioc4_checkirq(ide_hwif_t * hwif)
{
unsigned long intr_addr =
- hwif->io_ports[IDE_IRQ_OFFSET] + IOC4_INTR_REG * 4;
+ hwif->io_ports.irq_addr + IOC4_INTR_REG * 4;
if ((u8)readl((void __iomem *)intr_addr) & 0x03)
return 1;
@@ -134,8 +134,8 @@ sgiioc4_clearirq(ide_drive_t * drive)
{
u32 intr_reg;
ide_hwif_t *hwif = HWIF(drive);
- unsigned long other_ir =
- hwif->io_ports[IDE_IRQ_OFFSET] + (IOC4_INTR_REG << 2);
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2);
/* Code to check for PCI error conditions */
intr_reg = readl((void __iomem *)other_ir);
@@ -147,12 +147,12 @@ sgiioc4_clearirq(ide_drive_t * drive)
* a "clear" status if it got cleared. If not, then spin
* for a bit trying to clear it.
*/
- u8 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ u8 stat = sgiioc4_INB(io_ports->status_addr);
int count = 0;
- stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = sgiioc4_INB(io_ports->status_addr);
while ((stat & 0x80) && (count++ < 100)) {
udelay(1);
- stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = sgiioc4_INB(io_ports->status_addr);
}
if (intr_reg & 0x02) {
@@ -162,18 +162,18 @@ sgiioc4_clearirq(ide_drive_t * drive)
pci_stat_cmd_reg;
pci_err_addr_low =
- readl((void __iomem *)hwif->io_ports[IDE_IRQ_OFFSET]);
+ readl((void __iomem *)io_ports->irq_addr);
pci_err_addr_high =
- readl((void __iomem *)(hwif->io_ports[IDE_IRQ_OFFSET] + 4));
+ readl((void __iomem *)(io_ports->irq_addr + 4));
pci_read_config_dword(dev, PCI_COMMAND,
&pci_stat_cmd_reg);
printk(KERN_ERR
"%s(%s) : PCI Bus Error when doing DMA:"
" status-cmd reg is 0x%x\n",
- __FUNCTION__, drive->name, pci_stat_cmd_reg);
+ __func__, drive->name, pci_stat_cmd_reg);
printk(KERN_ERR
"%s(%s) : PCI Error Address is 0x%x%x\n",
- __FUNCTION__, drive->name,
+ __func__, drive->name,
pci_err_addr_high, pci_err_addr_low);
/* Clear the PCI Error indicator */
pci_write_config_dword(dev, PCI_COMMAND, 0x00000146);
@@ -188,7 +188,7 @@ sgiioc4_clearirq(ide_drive_t * drive)
return intr_reg & 3;
}
-static void sgiioc4_ide_dma_start(ide_drive_t * drive)
+static void sgiioc4_dma_start(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4;
@@ -215,8 +215,7 @@ sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base)
}
/* Stops the IOC4 DMA Engine */
-static int
-sgiioc4_ide_dma_end(ide_drive_t * drive)
+static int sgiioc4_dma_end(ide_drive_t *drive)
{
u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0;
ide_hwif_t *hwif = HWIF(drive);
@@ -232,7 +231,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
printk(KERN_ERR
"%s(%s): IOC4 DMA STOP bit is still 1 :"
"ioc4_dma_reg 0x%x\n",
- __FUNCTION__, drive->name, ioc4_dma);
+ __func__, drive->name, ioc4_dma);
dma_stat = 1;
}
@@ -251,7 +250,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
udelay(1);
}
if (!valid) {
- printk(KERN_ERR "%s(%s) : DMA incomplete\n", __FUNCTION__,
+ printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__,
drive->name);
dma_stat = 1;
}
@@ -264,7 +263,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
printk(KERN_ERR
"%s(%s): WARNING!! byte_count_dev %d "
"!= byte_count_mem %d\n",
- __FUNCTION__, drive->name, bc_dev, bc_mem);
+ __func__, drive->name, bc_dev, bc_mem);
}
}
@@ -279,8 +278,7 @@ static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
}
/* returns 1 if dma irq issued, 0 otherwise */
-static int
-sgiioc4_ide_dma_test_irq(ide_drive_t * drive)
+static int sgiioc4_dma_test_irq(ide_drive_t *drive)
{
return sgiioc4_checkirq(HWIF(drive));
}
@@ -294,7 +292,7 @@ static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
static void
sgiioc4_resetproc(ide_drive_t * drive)
{
- sgiioc4_ide_dma_end(drive);
+ sgiioc4_dma_end(drive);
sgiioc4_clearirq(drive);
}
@@ -329,13 +327,17 @@ sgiioc4_INB(unsigned long port)
/* Creates a dma map for the scatter-gather list entries */
static int __devinit
-ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
+ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
+ unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
void __iomem *virt_dma_base;
int num_ports = sizeof (ioc4_dma_regs_t);
void *pad;
+ if (dma_base == 0)
+ return -1;
+
printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name,
dma_base, dma_base + num_ports - 1);
@@ -343,7 +345,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
printk(KERN_ERR
"%s(%s) -- ERROR, Addresses 0x%p to 0x%p "
"ALREADY in use\n",
- __FUNCTION__, hwif->name, (void *) dma_base,
+ __func__, hwif->name, (void *) dma_base,
(void *) dma_base + num_ports - 1);
return -1;
}
@@ -352,7 +354,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
if (virt_dma_base == NULL) {
printk(KERN_ERR
"%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n",
- __FUNCTION__, hwif->name, dma_base, dma_base + num_ports - 1);
+ __func__, hwif->name, dma_base, dma_base + num_ports - 1);
goto dma_remap_failure;
}
hwif->dma_base = (unsigned long) virt_dma_base;
@@ -378,7 +380,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
hwif->dmatable_cpu, hwif->dmatable_dma);
printk(KERN_INFO
"%s() -- Error! Unable to allocate DMA Maps for drive %s\n",
- __FUNCTION__, hwif->name);
+ __func__, hwif->name);
printk(KERN_INFO
"Changing from DMA to PIO mode for Drive %s\n", hwif->name);
@@ -406,14 +408,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
if (ioc4_dma & IOC4_S_DMA_ACTIVE) {
printk(KERN_WARNING
"%s(%s):Warning!! DMA from previous transfer was still active\n",
- __FUNCTION__, drive->name);
+ __func__, drive->name);
writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
if (ioc4_dma & IOC4_S_DMA_STOP)
printk(KERN_ERR
"%s(%s) : IOC4 Dma STOP bit is still 1\n",
- __FUNCTION__, drive->name);
+ __func__, drive->name);
}
ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
@@ -421,14 +423,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
printk(KERN_WARNING
"%s(%s) : Warning!! - DMA Error during Previous"
" transfer | status 0x%x\n",
- __FUNCTION__, drive->name, ioc4_dma);
+ __func__, drive->name, ioc4_dma);
writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
if (ioc4_dma & IOC4_S_DMA_STOP)
printk(KERN_ERR
"%s(%s) : IOC4 DMA STOP bit is still 1\n",
- __FUNCTION__, drive->name);
+ __func__, drive->name);
}
/* Address of the Scatter Gather List */
@@ -519,7 +521,7 @@ use_pio_instead:
return 0; /* revert to PIO for this request */
}
-static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
+static int sgiioc4_dma_setup(ide_drive_t *drive)
{
struct request *rq = HWGROUP(drive)->rq;
unsigned int count = 0;
@@ -548,62 +550,45 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
return 0;
}
-static void __devinit
-ide_init_sgiioc4(ide_hwif_t * hwif)
-{
- hwif->mmio = 1;
- hwif->set_pio_mode = NULL; /* Sets timing for PIO mode */
- hwif->set_dma_mode = &sgiioc4_set_dma_mode;
- hwif->selectproc = NULL;/* Use the default routine to select drive */
- hwif->reset_poll = NULL;/* No HBA specific reset_poll needed */
- hwif->pre_reset = NULL; /* No HBA specific pre_set needed */
- hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine,
- clear interrupts */
- hwif->maskproc = &sgiioc4_maskproc; /* Mask on/off NIEN register */
- hwif->quirkproc = NULL;
-
- hwif->INB = &sgiioc4_INB;
-
- if (hwif->dma_base == 0)
- return;
+static const struct ide_port_ops sgiioc4_port_ops = {
+ .set_dma_mode = sgiioc4_set_dma_mode,
+ /* reset DMA engine, clear IRQs */
+ .resetproc = sgiioc4_resetproc,
+ /* mask on/off NIEN register */
+ .maskproc = sgiioc4_maskproc,
+};
- hwif->dma_host_set = &sgiioc4_dma_host_set;
- hwif->dma_setup = &sgiioc4_ide_dma_setup;
- hwif->dma_start = &sgiioc4_ide_dma_start;
- hwif->ide_dma_end = &sgiioc4_ide_dma_end;
- hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq;
- hwif->dma_lost_irq = &sgiioc4_dma_lost_irq;
- hwif->dma_timeout = &ide_dma_timeout;
-}
+static const struct ide_dma_ops sgiioc4_dma_ops = {
+ .dma_host_set = sgiioc4_dma_host_set,
+ .dma_setup = sgiioc4_dma_setup,
+ .dma_start = sgiioc4_dma_start,
+ .dma_end = sgiioc4_dma_end,
+ .dma_test_irq = sgiioc4_dma_test_irq,
+ .dma_lost_irq = sgiioc4_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
static const struct ide_port_info sgiioc4_port_info __devinitdata = {
.chipset = ide_pci,
- .host_flags = IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
- IDE_HFLAG_NO_AUTOTUNE,
+ .init_dma = ide_dma_sgiioc4,
+ .port_ops = &sgiioc4_port_ops,
+ .dma_ops = &sgiioc4_dma_ops,
.mwdma_mask = ATA_MWDMA2_ONLY,
};
static int __devinit
sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
{
- unsigned long cmd_base, dma_base, irqport;
+ unsigned long cmd_base, irqport;
unsigned long bar0, cmd_phys_base, ctl;
void __iomem *virt_base;
ide_hwif_t *hwif;
- int h;
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
hw_regs_t hw;
struct ide_port_info d = sgiioc4_port_info;
- /*
- * Find an empty HWIF; if none available, return -ENOMEM.
- */
- for (h = 0; h < MAX_HWIFS; ++h) {
- hwif = &ide_hwifs[h];
- if (hwif->chipset == ide_unknown)
- break;
- }
- if (h == MAX_HWIFS) {
+ hwif = ide_find_port();
+ if (hwif == NULL) {
printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n",
DRV_NAME);
return -ENOMEM;
@@ -620,7 +605,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET;
ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET;
irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET;
- dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
@@ -628,7 +612,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
printk(KERN_ERR
"%s : %s -- ERROR, Addresses "
"0x%p to 0x%p ALREADY in use\n",
- __FUNCTION__, hwif->name, (void *) cmd_phys_base,
+ __func__, hwif->name, (void *) cmd_phys_base,
(void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
return -ENOMEM;
}
@@ -649,13 +633,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
/* Initializing chipset IRQ Registers */
writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
- if (dma_base == 0 || ide_dma_sgiioc4(hwif, dma_base)) {
- printk(KERN_INFO "%s: %s Bus-Master DMA disabled\n",
- hwif->name, DRV_NAME);
- d.mwdma_mask = 0;
- }
-
- ide_init_sgiioc4(hwif);
+ hwif->INB = &sgiioc4_INB;
idx[0] = hwif->index;
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index b6be1b45f32..c2040a017f4 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -301,7 +301,7 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
}
/* returns 1 if dma irq issued, 0 otherwise */
-static int siimage_io_ide_dma_test_irq (ide_drive_t *drive)
+static int siimage_io_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -320,14 +320,14 @@ static int siimage_io_ide_dma_test_irq (ide_drive_t *drive)
}
/**
- * siimage_mmio_ide_dma_test_irq - check we caused an IRQ
+ * siimage_mmio_dma_test_irq - check we caused an IRQ
* @drive: drive we are testing
*
* Check if we caused an IDE DMA interrupt. We may also have caused
* SATA status interrupts, if so we clean them up and continue.
*/
-
-static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
+
+static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long addr = siimage_selreg(hwif, 0x1);
@@ -347,7 +347,7 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
printk(KERN_WARNING "%s: sata_error = 0x%08x, "
"watchdog = %d, %s\n",
drive->name, sata_error, watchdog,
- __FUNCTION__);
+ __func__);
} else {
watchdog = (ext_stat & 0x8000) ? 1 : 0;
@@ -369,6 +369,14 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
return 0;
}
+static int siimage_dma_test_irq(ide_drive_t *drive)
+{
+ if (drive->hwif->mmio)
+ return siimage_mmio_dma_test_irq(drive);
+ else
+ return siimage_io_dma_test_irq(drive);
+}
+
/**
* sil_sata_reset_poll - wait for SATA reset
* @drive: drive we are resetting
@@ -614,9 +622,10 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
struct pci_dev *dev = to_pci_dev(hwif->dev);
void *addr = pci_get_drvdata(dev);
u8 ch = hwif->channel;
- hw_regs_t hw;
unsigned long base;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+
/*
* Fill in the basic HWIF bits
*/
@@ -630,7 +639,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
* based I/O
*/
- memset(&hw, 0, sizeof(hw_regs_t));
+ memset(io_ports, 0, sizeof(*io_ports));
base = (unsigned long)addr;
if (ch)
@@ -643,17 +652,15 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
* so we can't currently use it sanely since we want to
* use LBA48 mode.
*/
- hw.io_ports[IDE_DATA_OFFSET] = base;
- hw.io_ports[IDE_ERROR_OFFSET] = base + 1;
- hw.io_ports[IDE_NSECTOR_OFFSET] = base + 2;
- hw.io_ports[IDE_SECTOR_OFFSET] = base + 3;
- hw.io_ports[IDE_LCYL_OFFSET] = base + 4;
- hw.io_ports[IDE_HCYL_OFFSET] = base + 5;
- hw.io_ports[IDE_SELECT_OFFSET] = base + 6;
- hw.io_ports[IDE_STATUS_OFFSET] = base + 7;
- hw.io_ports[IDE_CONTROL_OFFSET] = base + 10;
-
- hw.io_ports[IDE_IRQ_OFFSET] = 0;
+ io_ports->data_addr = base;
+ io_ports->error_addr = base + 1;
+ io_ports->nsect_addr = base + 2;
+ io_ports->lbal_addr = base + 3;
+ io_ports->lbam_addr = base + 4;
+ io_ports->lbah_addr = base + 5;
+ io_ports->device_addr = base + 6;
+ io_ports->status_addr = base + 7;
+ io_ports->ctl_addr = base + 10;
if (pdev_is_sata(dev)) {
base = (unsigned long)addr;
@@ -664,8 +671,6 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100;
}
- memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
-
hwif->irq = dev->irq;
hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00);
@@ -735,14 +740,14 @@ static void __devinit init_iops_siimage(ide_hwif_t *hwif)
}
/**
- * ata66_siimage - check for 80 pin cable
+ * sil_cable_detect - cable detection
* @hwif: interface to check
*
* Check for the presence of an ATA66 capable cable on the
* interface.
*/
-static u8 __devinit ata66_siimage(ide_hwif_t *hwif)
+static u8 __devinit sil_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long addr = siimage_selreg(hwif, 0);
@@ -756,68 +761,44 @@ static u8 __devinit ata66_siimage(ide_hwif_t *hwif)
return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
}
-/**
- * init_hwif_siimage - set up hwif structs
- * @hwif: interface to set up
- *
- * We do the basic set up of the interface structure. The SIIMAGE
- * requires several custom handlers so we override the default
- * ide DMA handlers appropriately
- */
-
-static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
-{
- u8 sata = is_sata(hwif);
-
- hwif->set_pio_mode = &sil_set_pio_mode;
- hwif->set_dma_mode = &sil_set_dma_mode;
- hwif->quirkproc = &sil_quirkproc;
-
- if (sata) {
- static int first = 1;
-
- hwif->reset_poll = &sil_sata_reset_poll;
- hwif->pre_reset = &sil_sata_pre_reset;
- hwif->udma_filter = &sil_sata_udma_filter;
-
- if (first) {
- printk(KERN_INFO "siimage: For full SATA support you should use the libata sata_sil module.\n");
- first = 0;
- }
- } else
- hwif->udma_filter = &sil_pata_udma_filter;
-
- hwif->cable_detect = ata66_siimage;
-
- if (hwif->dma_base == 0)
- return;
+static const struct ide_port_ops sil_pata_port_ops = {
+ .set_pio_mode = sil_set_pio_mode,
+ .set_dma_mode = sil_set_dma_mode,
+ .quirkproc = sil_quirkproc,
+ .udma_filter = sil_pata_udma_filter,
+ .cable_detect = sil_cable_detect,
+};
- if (sata)
- hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
+static const struct ide_port_ops sil_sata_port_ops = {
+ .set_pio_mode = sil_set_pio_mode,
+ .set_dma_mode = sil_set_dma_mode,
+ .reset_poll = sil_sata_reset_poll,
+ .pre_reset = sil_sata_pre_reset,
+ .quirkproc = sil_quirkproc,
+ .udma_filter = sil_sata_udma_filter,
+ .cable_detect = sil_cable_detect,
+};
- if (hwif->mmio) {
- hwif->ide_dma_test_irq = &siimage_mmio_ide_dma_test_irq;
- } else {
- hwif->ide_dma_test_irq = & siimage_io_ide_dma_test_irq;
- }
-}
+static struct ide_dma_ops sil_dma_ops = {
+ .dma_test_irq = siimage_dma_test_irq,
+};
-#define DECLARE_SII_DEV(name_str) \
+#define DECLARE_SII_DEV(name_str, p_ops) \
{ \
.name = name_str, \
.init_chipset = init_chipset_siimage, \
.init_iops = init_iops_siimage, \
- .init_hwif = init_hwif_siimage, \
- .host_flags = IDE_HFLAG_BOOTABLE, \
+ .port_ops = p_ops, \
+ .dma_ops = &sil_dma_ops, \
.pio_mask = ATA_PIO4, \
.mwdma_mask = ATA_MWDMA2, \
.udma_mask = ATA_UDMA6, \
}
static const struct ide_port_info siimage_chipsets[] __devinitdata = {
- /* 0 */ DECLARE_SII_DEV("SiI680"),
- /* 1 */ DECLARE_SII_DEV("SiI3112 Serial ATA"),
- /* 2 */ DECLARE_SII_DEV("Adaptec AAR-1210SA")
+ /* 0 */ DECLARE_SII_DEV("SiI680", &sil_pata_port_ops),
+ /* 1 */ DECLARE_SII_DEV("SiI3112 Serial ATA", &sil_sata_port_ops),
+ /* 2 */ DECLARE_SII_DEV("Adaptec AAR-1210SA", &sil_sata_port_ops)
};
/**
@@ -831,7 +812,24 @@ static const struct ide_port_info siimage_chipsets[] __devinitdata = {
static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- return ide_setup_pci_device(dev, &siimage_chipsets[id->driver_data]);
+ struct ide_port_info d;
+ u8 idx = id->driver_data;
+
+ d = siimage_chipsets[idx];
+
+ if (idx) {
+ static int first = 1;
+
+ if (first) {
+ printk(KERN_INFO "siimage: For full SATA support you "
+ "should use the libata sata_sil module.\n");
+ first = 0;
+ }
+
+ d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
+ }
+
+ return ide_setup_pci_device(dev, &d);
}
static const struct pci_device_id siimage_pci_tbl[] = {
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 512bb4c1fd5..4b0b85d8faf 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -59,10 +59,10 @@
#define ATA_16 0x01
#define ATA_33 0x02
#define ATA_66 0x03
-#define ATA_100a 0x04 // SiS730/SiS550 is ATA100 with ATA66 layout
+#define ATA_100a 0x04 /* SiS730/SiS550 is ATA100 with ATA66 layout */
#define ATA_100 0x05
-#define ATA_133a 0x06 // SiS961b with 133 support
-#define ATA_133 0x07 // SiS962/963
+#define ATA_133a 0x06 /* SiS961b with 133 support */
+#define ATA_133 0x07 /* SiS962/963 */
static u8 chipset_family;
@@ -111,69 +111,70 @@ static const struct {
Indexed by chipset_family and (dma_mode - XFER_UDMA_0) */
/* {0, ATA_16, ATA_33, ATA_66, ATA_100a, ATA_100, ATA_133} */
-static u8 cycle_time_offset[] = {0,0,5,4,4,0,0};
-static u8 cycle_time_range[] = {0,0,2,3,3,4,4};
+static u8 cycle_time_offset[] = { 0, 0, 5, 4, 4, 0, 0 };
+static u8 cycle_time_range[] = { 0, 0, 2, 3, 3, 4, 4 };
static u8 cycle_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
- {0,0,0,0,0,0,0}, /* no udma */
- {0,0,0,0,0,0,0}, /* no udma */
- {3,2,1,0,0,0,0}, /* ATA_33 */
- {7,5,3,2,1,0,0}, /* ATA_66 */
- {7,5,3,2,1,0,0}, /* ATA_100a (730 specific), differences are on cycle_time range and offset */
- {11,7,5,4,2,1,0}, /* ATA_100 */
- {15,10,7,5,3,2,1}, /* ATA_133a (earliest 691 southbridges) */
- {15,10,7,5,3,2,1}, /* ATA_133 */
+ { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
+ { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
+ { 3, 2, 1, 0, 0, 0, 0 }, /* ATA_33 */
+ { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_66 */
+ { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_100a (730 specific),
+ different cycle_time range and offset */
+ { 11, 7, 5, 4, 2, 1, 0 }, /* ATA_100 */
+ { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133a (earliest 691 southbridges) */
+ { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133 */
};
/* CRC Valid Setup Time vary across IDE clock setting 33/66/100/133
See SiS962 data sheet for more detail */
static u8 cvs_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = {
- {0,0,0,0,0,0,0}, /* no udma */
- {0,0,0,0,0,0,0}, /* no udma */
- {2,1,1,0,0,0,0},
- {4,3,2,1,0,0,0},
- {4,3,2,1,0,0,0},
- {6,4,3,1,1,1,0},
- {9,6,4,2,2,2,2},
- {9,6,4,2,2,2,2},
+ { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
+ { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */
+ { 2, 1, 1, 0, 0, 0, 0 },
+ { 4, 3, 2, 1, 0, 0, 0 },
+ { 4, 3, 2, 1, 0, 0, 0 },
+ { 6, 4, 3, 1, 1, 1, 0 },
+ { 9, 6, 4, 2, 2, 2, 2 },
+ { 9, 6, 4, 2, 2, 2, 2 },
};
/* Initialize time, Active time, Recovery time vary across
IDE clock settings. These 3 arrays hold the register value
for PIO0/1/2/3/4 and DMA0/1/2 mode in order */
static u8 ini_time_value[][8] = {
- {0,0,0,0,0,0,0,0},
- {0,0,0,0,0,0,0,0},
- {2,1,0,0,0,1,0,0},
- {4,3,1,1,1,3,1,1},
- {4,3,1,1,1,3,1,1},
- {6,4,2,2,2,4,2,2},
- {9,6,3,3,3,6,3,3},
- {9,6,3,3,3,6,3,3},
+ { 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 2, 1, 0, 0, 0, 1, 0, 0 },
+ { 4, 3, 1, 1, 1, 3, 1, 1 },
+ { 4, 3, 1, 1, 1, 3, 1, 1 },
+ { 6, 4, 2, 2, 2, 4, 2, 2 },
+ { 9, 6, 3, 3, 3, 6, 3, 3 },
+ { 9, 6, 3, 3, 3, 6, 3, 3 },
};
static u8 act_time_value[][8] = {
- {0,0,0,0,0,0,0,0},
- {0,0,0,0,0,0,0,0},
- {9,9,9,2,2,7,2,2},
- {19,19,19,5,4,14,5,4},
- {19,19,19,5,4,14,5,4},
- {28,28,28,7,6,21,7,6},
- {38,38,38,10,9,28,10,9},
- {38,38,38,10,9,28,10,9},
+ { 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 9, 9, 9, 2, 2, 7, 2, 2 },
+ { 19, 19, 19, 5, 4, 14, 5, 4 },
+ { 19, 19, 19, 5, 4, 14, 5, 4 },
+ { 28, 28, 28, 7, 6, 21, 7, 6 },
+ { 38, 38, 38, 10, 9, 28, 10, 9 },
+ { 38, 38, 38, 10, 9, 28, 10, 9 },
};
static u8 rco_time_value[][8] = {
- {0,0,0,0,0,0,0,0},
- {0,0,0,0,0,0,0,0},
- {9,2,0,2,0,7,1,1},
- {19,5,1,5,2,16,3,2},
- {19,5,1,5,2,16,3,2},
- {30,9,3,9,4,25,6,4},
- {40,12,4,12,5,34,12,5},
- {40,12,4,12,5,34,12,5},
+ { 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 9, 2, 0, 2, 0, 7, 1, 1 },
+ { 19, 5, 1, 5, 2, 16, 3, 2 },
+ { 19, 5, 1, 5, 2, 16, 3, 2 },
+ { 30, 9, 3, 9, 4, 25, 6, 4 },
+ { 40, 12, 4, 12, 5, 34, 12, 5 },
+ { 40, 12, 4, 12, 5, 34, 12, 5 },
};
/*
* Printing configuration
*/
/* Used for chipset type printing at boot time */
-static char* chipset_capability[] = {
+static char *chipset_capability[] = {
"ATA", "ATA 16",
"ATA 33", "ATA 66",
"ATA 100 (1st gen)", "ATA 100 (2nd gen)",
@@ -272,7 +273,7 @@ static void sis_program_timings(ide_drive_t *drive, const u8 mode)
sis_ata133_program_timings(drive, mode);
}
-static void config_drive_art_rwp (ide_drive_t *drive)
+static void config_drive_art_rwp(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
@@ -346,7 +347,7 @@ static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
sis_program_timings(drive, speed);
}
-static u8 sis5513_ata133_udma_filter(ide_drive_t *drive)
+static u8 sis_ata133_udma_filter(ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
u32 regdw = 0;
@@ -358,8 +359,7 @@ static u8 sis5513_ata133_udma_filter(ide_drive_t *drive)
return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5;
}
-/* Chip detection and general config */
-static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const char *name)
+static int __devinit sis_find_family(struct pci_dev *dev)
{
struct pci_dev *host;
int i = 0;
@@ -381,7 +381,7 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
chipset_family = ATA_100a;
}
pci_dev_put(host);
-
+
printk(KERN_INFO "SIS5513: %s %s controller\n",
SiSHostChipInfo[i].name, chipset_capability[chipset_family]);
}
@@ -440,63 +440,60 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
}
}
- if (!chipset_family)
- return -1;
+ return chipset_family;
+}
+static unsigned int __devinit init_chipset_sis5513(struct pci_dev *dev,
+ const char *name)
+{
/* Make general config ops here
1/ tell IDE channels to operate in Compatibility mode only
2/ tell old chips to allow per drive IDE timings */
- {
- u8 reg;
- u16 regw;
-
- switch(chipset_family) {
- case ATA_133:
- /* SiS962 operation mode */
- pci_read_config_word(dev, 0x50, &regw);
- if (regw & 0x08)
- pci_write_config_word(dev, 0x50, regw&0xfff7);
- pci_read_config_word(dev, 0x52, &regw);
- if (regw & 0x08)
- pci_write_config_word(dev, 0x52, regw&0xfff7);
- break;
- case ATA_133a:
- case ATA_100:
- /* Fixup latency */
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
- /* Set compatibility bit */
- pci_read_config_byte(dev, 0x49, &reg);
- if (!(reg & 0x01)) {
- pci_write_config_byte(dev, 0x49, reg|0x01);
- }
- break;
- case ATA_100a:
- case ATA_66:
- /* Fixup latency */
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10);
-
- /* On ATA_66 chips the bit was elsewhere */
- pci_read_config_byte(dev, 0x52, &reg);
- if (!(reg & 0x04)) {
- pci_write_config_byte(dev, 0x52, reg|0x04);
- }
- break;
- case ATA_33:
- /* On ATA_33 we didn't have a single bit to set */
- pci_read_config_byte(dev, 0x09, &reg);
- if ((reg & 0x0f) != 0x00) {
- pci_write_config_byte(dev, 0x09, reg&0xf0);
- }
- case ATA_16:
- /* force per drive recovery and active timings
- needed on ATA_33 and below chips */
- pci_read_config_byte(dev, 0x52, &reg);
- if (!(reg & 0x08)) {
- pci_write_config_byte(dev, 0x52, reg|0x08);
- }
- break;
- }
+ u8 reg;
+ u16 regw;
+
+ switch (chipset_family) {
+ case ATA_133:
+ /* SiS962 operation mode */
+ pci_read_config_word(dev, 0x50, &regw);
+ if (regw & 0x08)
+ pci_write_config_word(dev, 0x50, regw&0xfff7);
+ pci_read_config_word(dev, 0x52, &regw);
+ if (regw & 0x08)
+ pci_write_config_word(dev, 0x52, regw&0xfff7);
+ break;
+ case ATA_133a:
+ case ATA_100:
+ /* Fixup latency */
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
+ /* Set compatibility bit */
+ pci_read_config_byte(dev, 0x49, &reg);
+ if (!(reg & 0x01))
+ pci_write_config_byte(dev, 0x49, reg|0x01);
+ break;
+ case ATA_100a:
+ case ATA_66:
+ /* Fixup latency */
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10);
+
+ /* On ATA_66 chips the bit was elsewhere */
+ pci_read_config_byte(dev, 0x52, &reg);
+ if (!(reg & 0x04))
+ pci_write_config_byte(dev, 0x52, reg|0x04);
+ break;
+ case ATA_33:
+ /* On ATA_33 we didn't have a single bit to set */
+ pci_read_config_byte(dev, 0x09, &reg);
+ if ((reg & 0x0f) != 0x00)
+ pci_write_config_byte(dev, 0x09, reg&0xf0);
+ case ATA_16:
+ /* force per drive recovery and active timings
+ needed on ATA_33 and below chips */
+ pci_read_config_byte(dev, 0x52, &reg);
+ if (!(reg & 0x08))
+ pci_write_config_byte(dev, 0x52, reg|0x08);
+ break;
}
return 0;
@@ -517,7 +514,7 @@ static const struct sis_laptop sis_laptop[] = {
{ 0, }
};
-static u8 __devinit ata66_sis5513(ide_hwif_t *hwif)
+static u8 __devinit sis_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *pdev = to_pci_dev(hwif->dev);
const struct sis_laptop *lap = &sis_laptop[0];
@@ -546,38 +543,44 @@ static u8 __devinit ata66_sis5513(ide_hwif_t *hwif)
return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
}
-static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif)
-{
- u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
-
- hwif->set_pio_mode = &sis_set_pio_mode;
- hwif->set_dma_mode = &sis_set_dma_mode;
-
- if (chipset_family >= ATA_133)
- hwif->udma_filter = sis5513_ata133_udma_filter;
-
- hwif->cable_detect = ata66_sis5513;
-
- if (hwif->dma_base == 0)
- return;
+static const struct ide_port_ops sis_port_ops = {
+ .set_pio_mode = sis_set_pio_mode,
+ .set_dma_mode = sis_set_dma_mode,
+ .cable_detect = sis_cable_detect,
+};
- hwif->ultra_mask = udma_rates[chipset_family];
-}
+static const struct ide_port_ops sis_ata133_port_ops = {
+ .set_pio_mode = sis_set_pio_mode,
+ .set_dma_mode = sis_set_dma_mode,
+ .udma_filter = sis_ata133_udma_filter,
+ .cable_detect = sis_cable_detect,
+};
static const struct ide_port_info sis5513_chipset __devinitdata = {
.name = "SIS5513",
.init_chipset = init_chipset_sis5513,
- .init_hwif = init_hwif_sis5513,
- .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
- .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA |
- IDE_HFLAG_BOOTABLE,
+ .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
+ .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
};
static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- return ide_setup_pci_device(dev, &sis5513_chipset);
+ struct ide_port_info d = sis5513_chipset;
+ u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
+
+ if (sis_find_family(dev) == 0)
+ return -ENOTSUPP;
+
+ if (chipset_family >= ATA_133)
+ d.port_ops = &sis_ata133_port_ops;
+ else
+ d.port_ops = &sis_port_ops;
+
+ d.udma_mask = udma_rates[chipset_family];
+
+ return ide_setup_pci_device(dev, &d);
}
static const struct pci_device_id sis5513_pci_tbl[] = {
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 1f00251a4a8..ce84fa045d3 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -179,7 +179,7 @@ static void sl82c105_dma_start(ide_drive_t *drive)
struct pci_dev *dev = to_pci_dev(hwif->dev);
int reg = 0x44 + drive->dn * 4;
- DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
+ DBG(("%s(drive:%s)\n", __func__, drive->name));
pci_write_config_word(dev, reg, drive->drive_data >> 16);
@@ -203,7 +203,7 @@ static int sl82c105_dma_end(ide_drive_t *drive)
int reg = 0x44 + drive->dn * 4;
int ret;
- DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
+ DBG(("%s(drive:%s)\n", __func__, drive->name));
ret = __ide_dma_end(drive);
@@ -232,7 +232,7 @@ static void sl82c105_resetproc(ide_drive_t *drive)
* Return the revision of the Winbond bridge
* which this function is part of.
*/
-static unsigned int sl82c105_bridge_revision(struct pci_dev *dev)
+static u8 sl82c105_bridge_revision(struct pci_dev *dev)
{
struct pci_dev *bridge;
@@ -282,64 +282,59 @@ static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const c
return dev->irq;
}
-/*
- * Initialise IDE channel
- */
-static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
-{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned int rev;
-
- DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index));
-
- hwif->set_pio_mode = &sl82c105_set_pio_mode;
- hwif->set_dma_mode = &sl82c105_set_dma_mode;
- hwif->resetproc = &sl82c105_resetproc;
-
- if (!hwif->dma_base)
- return;
-
- rev = sl82c105_bridge_revision(dev);
- if (rev <= 5) {
- /*
- * Never ever EVER under any circumstances enable
- * DMA when the bridge is this old.
- */
- printk(" %s: Winbond W83C553 bridge revision %d, "
- "BM-DMA disabled\n", hwif->name, rev);
- return;
- }
-
- hwif->mwdma_mask = ATA_MWDMA2;
-
- hwif->dma_lost_irq = &sl82c105_dma_lost_irq;
- hwif->dma_start = &sl82c105_dma_start;
- hwif->ide_dma_end = &sl82c105_dma_end;
- hwif->dma_timeout = &sl82c105_dma_timeout;
+static const struct ide_port_ops sl82c105_port_ops = {
+ .set_pio_mode = sl82c105_set_pio_mode,
+ .set_dma_mode = sl82c105_set_dma_mode,
+ .resetproc = sl82c105_resetproc,
+};
- if (hwif->mate)
- hwif->serialized = hwif->mate->serialized = 1;
-}
+static const struct ide_dma_ops sl82c105_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = sl82c105_dma_start,
+ .dma_end = sl82c105_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = sl82c105_dma_lost_irq,
+ .dma_timeout = sl82c105_dma_timeout,
+};
static const struct ide_port_info sl82c105_chipset __devinitdata = {
.name = "W82C105",
.init_chipset = init_chipset_sl82c105,
- .init_hwif = init_hwif_sl82c105,
.enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
+ .port_ops = &sl82c105_port_ops,
+ .dma_ops = &sl82c105_dma_ops,
.host_flags = IDE_HFLAG_IO_32BIT |
IDE_HFLAG_UNMASK_IRQS |
/* FIXME: check for Compatibility mode in generic IDE PCI code */
#if defined(CONFIG_LOPEC) || defined(CONFIG_SANDPOINT)
IDE_HFLAG_FORCE_LEGACY_IRQS |
#endif
- IDE_HFLAG_NO_AUTODMA |
- IDE_HFLAG_BOOTABLE,
+ IDE_HFLAG_SERIALIZE_DMA |
+ IDE_HFLAG_NO_AUTODMA,
.pio_mask = ATA_PIO5,
+ .mwdma_mask = ATA_MWDMA2,
};
static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- return ide_setup_pci_device(dev, &sl82c105_chipset);
+ struct ide_port_info d = sl82c105_chipset;
+ u8 rev = sl82c105_bridge_revision(dev);
+
+ if (rev <= 5) {
+ /*
+ * Never ever EVER under any circumstances enable
+ * DMA when the bridge is this old.
+ */
+ printk(KERN_INFO "W82C105_IDE: Winbond W83C553 bridge "
+ "revision %d, BM-DMA disabled\n", rev);
+ d.dma_ops = NULL;
+ d.mwdma_mask = 0;
+ d.host_flags &= ~IDE_HFLAG_SERIALIZE_DMA;
+ }
+
+ return ide_setup_pci_device(dev, &d);
}
static const struct pci_device_id sl82c105_pci_tbl[] = {
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 65f4c2ffaa5..dae6e2c94d8 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -27,9 +27,9 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
unsigned long flags;
u16 master_data;
u8 slave_data;
- int control = 0;
+ int control = 0;
/* ISP RTC */
- static const u8 timings[][2]= {
+ static const u8 timings[][2] = {
{ 0, 0 },
{ 0, 0 },
{ 1, 0 },
@@ -125,19 +125,17 @@ static u8 __devinit slc90e66_cable_detect(ide_hwif_t *hwif)
return (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
-static void __devinit init_hwif_slc90e66(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &slc90e66_set_pio_mode;
- hwif->set_dma_mode = &slc90e66_set_dma_mode;
-
- hwif->cable_detect = slc90e66_cable_detect;
-}
+static const struct ide_port_ops slc90e66_port_ops = {
+ .set_pio_mode = slc90e66_set_pio_mode,
+ .set_dma_mode = slc90e66_set_dma_mode,
+ .cable_detect = slc90e66_cable_detect,
+};
static const struct ide_port_info slc90e66_chipset __devinitdata = {
.name = "SLC90E66",
- .init_hwif = init_hwif_slc90e66,
- .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
- .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE,
+ .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
+ .port_ops = &slc90e66_port_ops,
+ .host_flags = IDE_HFLAG_LEGACY_IRQS,
.pio_mask = ATA_PIO4,
.swdma_mask = ATA_SWDMA2_ONLY,
.mwdma_mask = ATA_MWDMA12_ONLY,
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 1e4a6262bce..9b4b27a4c71 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -18,20 +18,20 @@ static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
u16 mode, scr = inw(scr_port);
switch (speed) {
- case XFER_UDMA_4: mode = 0x00c0; break;
- case XFER_UDMA_3: mode = 0x00b0; break;
- case XFER_UDMA_2: mode = 0x00a0; break;
- case XFER_UDMA_1: mode = 0x0090; break;
- case XFER_UDMA_0: mode = 0x0080; break;
- case XFER_MW_DMA_2: mode = 0x0070; break;
- case XFER_MW_DMA_1: mode = 0x0060; break;
- case XFER_MW_DMA_0: mode = 0x0050; break;
- case XFER_PIO_4: mode = 0x0400; break;
- case XFER_PIO_3: mode = 0x0300; break;
- case XFER_PIO_2: mode = 0x0200; break;
- case XFER_PIO_1: mode = 0x0100; break;
- case XFER_PIO_0:
- default: mode = 0x0000; break;
+ case XFER_UDMA_4: mode = 0x00c0; break;
+ case XFER_UDMA_3: mode = 0x00b0; break;
+ case XFER_UDMA_2: mode = 0x00a0; break;
+ case XFER_UDMA_1: mode = 0x0090; break;
+ case XFER_UDMA_0: mode = 0x0080; break;
+ case XFER_MW_DMA_2: mode = 0x0070; break;
+ case XFER_MW_DMA_1: mode = 0x0060; break;
+ case XFER_MW_DMA_0: mode = 0x0050; break;
+ case XFER_PIO_4: mode = 0x0400; break;
+ case XFER_PIO_3: mode = 0x0300; break;
+ case XFER_PIO_2: mode = 0x0200; break;
+ case XFER_PIO_1: mode = 0x0100; break;
+ case XFER_PIO_0:
+ default: mode = 0x0000; break;
}
scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f;
@@ -157,11 +157,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
/* Store the system control register base for convenience... */
hwif->config_data = sc_base;
- hwif->set_pio_mode = &tc86c001_set_pio_mode;
- hwif->set_dma_mode = &tc86c001_set_mode;
-
- hwif->cable_detect = tc86c001_cable_detect;
-
if (!hwif->dma_base)
return;
@@ -173,8 +168,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
/* Sector Count Register limit */
hwif->rqsize = 0xffff;
-
- hwif->dma_start = &tc86c001_dma_start;
}
static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
@@ -187,10 +180,29 @@ static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
return err;
}
+static const struct ide_port_ops tc86c001_port_ops = {
+ .set_pio_mode = tc86c001_set_pio_mode,
+ .set_dma_mode = tc86c001_set_mode,
+ .cable_detect = tc86c001_cable_detect,
+};
+
+static const struct ide_dma_ops tc86c001_dma_ops = {
+ .dma_host_set = ide_dma_host_set,
+ .dma_setup = ide_dma_setup,
+ .dma_exec_cmd = ide_dma_exec_cmd,
+ .dma_start = tc86c001_dma_start,
+ .dma_end = __ide_dma_end,
+ .dma_test_irq = ide_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
+
static const struct ide_port_info tc86c001_chipset __devinitdata = {
.name = "TC86C001",
.init_chipset = init_chipset_tc86c001,
.init_hwif = init_hwif_tc86c001,
+ .port_ops = &tc86c001_port_ops,
+ .dma_ops = &tc86c001_dma_ops,
.host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD |
IDE_HFLAG_ABUSE_SET_DMA_MODE,
.pio_mask = ATA_PIO4,
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
index a67d02a3f96..db65a558d4e 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/pci/triflex.c
@@ -87,17 +87,15 @@ static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio)
triflex_set_mode(drive, XFER_PIO_0 + pio);
}
-static void __devinit init_hwif_triflex(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &triflex_set_pio_mode;
- hwif->set_dma_mode = &triflex_set_mode;
-}
+static const struct ide_port_ops triflex_port_ops = {
+ .set_pio_mode = triflex_set_pio_mode,
+ .set_dma_mode = triflex_set_mode,
+};
static const struct ide_port_info triflex_device __devinitdata = {
.name = "TRIFLEX",
- .init_hwif = init_hwif_triflex,
.enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
- .host_flags = IDE_HFLAG_BOOTABLE,
+ .port_ops = &triflex_port_ops,
.pio_mask = ATA_PIO4,
.swdma_mask = ATA_SWDMA2,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index de750f7a43e..a8a3138682e 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -214,7 +214,7 @@ static void trm290_dma_start(ide_drive_t *drive)
{
}
-static int trm290_ide_dma_end (ide_drive_t *drive)
+static int trm290_dma_end(ide_drive_t *drive)
{
u16 status;
@@ -225,7 +225,7 @@ static int trm290_ide_dma_end (ide_drive_t *drive)
return status != 0x00ff;
}
-static int trm290_ide_dma_test_irq (ide_drive_t *drive)
+static int trm290_dma_test_irq(ide_drive_t *drive)
{
u16 status;
@@ -254,22 +254,11 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
hwif->config_data = cfg_base;
hwif->dma_base = (cfg_base + 4) ^ (hwif->channel ? 0x80 : 0);
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx",
+ printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, hwif->dma_base, hwif->dma_base + 3);
- if (!request_region(hwif->dma_base, 4, hwif->name)) {
- printk(KERN_CONT " -- Error, ports in use.\n");
+ if (ide_allocate_dma_engine(hwif))
return;
- }
-
- hwif->dmatable_cpu = pci_alloc_consistent(dev, PRD_ENTRIES * PRD_BYTES,
- &hwif->dmatable_dma);
- if (!hwif->dmatable_cpu) {
- printk(KERN_CONT " -- Error, unable to allocate DMA table.\n");
- release_region(hwif->dma_base, 4);
- return;
- }
- printk(KERN_CONT "\n");
local_irq_save(flags);
/* put config reg into first byte of hwif->select_data */
@@ -291,14 +280,6 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
/* sharing IRQ with mate */
hwif->irq = hwif->mate->irq;
- hwif->dma_host_set = &trm290_dma_host_set;
- hwif->dma_setup = &trm290_dma_setup;
- hwif->dma_exec_cmd = &trm290_dma_exec_cmd;
- hwif->dma_start = &trm290_dma_start;
- hwif->ide_dma_end = &trm290_ide_dma_end;
- hwif->ide_dma_test_irq = &trm290_ide_dma_test_irq;
-
- hwif->selectproc = &trm290_selectproc;
#if 1
{
/*
@@ -317,7 +298,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
if (old != compat && old_mask == 0xff) {
/* leave lower 10 bits untouched */
compat += (next_offset += 0x400);
- hwif->io_ports[IDE_CONTROL_OFFSET] = compat + 2;
+ hwif->io_ports.ctl_addr = compat + 2;
outw(compat | 1, hwif->config_data);
new = inw(hwif->config_data);
printk(KERN_INFO "%s: control basereg workaround: "
@@ -328,16 +309,32 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
#endif
}
+static const struct ide_port_ops trm290_port_ops = {
+ .selectproc = trm290_selectproc,
+};
+
+static struct ide_dma_ops trm290_dma_ops = {
+ .dma_host_set = trm290_dma_host_set,
+ .dma_setup = trm290_dma_setup,
+ .dma_exec_cmd = trm290_dma_exec_cmd,
+ .dma_start = trm290_dma_start,
+ .dma_end = trm290_dma_end,
+ .dma_test_irq = trm290_dma_test_irq,
+ .dma_lost_irq = ide_dma_lost_irq,
+ .dma_timeout = ide_dma_timeout,
+};
+
static const struct ide_port_info trm290_chipset __devinitdata = {
.name = "TRM290",
.init_hwif = init_hwif_trm290,
.chipset = ide_trm290,
+ .port_ops = &trm290_port_ops,
+ .dma_ops = &trm290_dma_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
#if 0 /* play it safe for now */
IDE_HFLAG_TRUST_BIOS_FOR_DMA |
#endif
IDE_HFLAG_NO_AUTODMA |
- IDE_HFLAG_BOOTABLE |
IDE_HFLAG_NO_LBA48,
};
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 9004e752188..566e0ecb8db 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -340,7 +340,7 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
* Determine system bus clock.
*/
- via_clock = system_bus_clock() * 1000;
+ via_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
switch (via_clock) {
case 33000: via_clock = 33333; break;
@@ -415,25 +415,21 @@ static u8 __devinit via82cxxx_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA40;
}
-static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
-{
- hwif->set_pio_mode = &via_set_pio_mode;
- hwif->set_dma_mode = &via_set_drive;
-
- hwif->cable_detect = via82cxxx_cable_detect;
-}
+static const struct ide_port_ops via_port_ops = {
+ .set_pio_mode = via_set_pio_mode,
+ .set_dma_mode = via_set_drive,
+ .cable_detect = via82cxxx_cable_detect,
+};
static const struct ide_port_info via82cxxx_chipset __devinitdata = {
.name = "VP_IDE",
.init_chipset = init_chipset_via82cxxx,
- .init_hwif = init_hwif_via82cxxx,
.enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
+ .port_ops = &via_port_ops,
.host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
- IDE_HFLAG_PIO_NO_DOWNGRADE |
IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_IO_32BIT |
- IDE_HFLAG_BOOTABLE,
+ IDE_HFLAG_IO_32BIT,
.pio_mask = ATA_PIO5,
.swdma_mask = ATA_SWDMA2,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
index a784a97ca7e..f0e638dcc3a 100644
--- a/drivers/ide/ppc/mpc8xx.c
+++ b/drivers/ide/ppc/mpc8xx.c
@@ -36,6 +36,8 @@
#include <asm/machdep.h>
#include <asm/irq.h>
+#define DRV_NAME "ide-mpc8xx"
+
static int identify (volatile u8 *p);
static void print_fixed (volatile u8 *p);
static void print_funcid (int func);
@@ -127,9 +129,9 @@ static int pcmcia_schlvl = PCMCIA_SCHLVL;
* MPC8xx's internal PCMCIA interface
*/
#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT)
-static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
+static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
{
- unsigned long *p = hw->io_ports;
+ unsigned long *p = hw->io_ports_array;
int i;
typedef struct {
@@ -182,6 +184,13 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
pcmcia_phy_base, pcmcia_phy_end,
pcmcia_phy_end - pcmcia_phy_base);
+ if (!request_mem_region(pcmcia_phy_base,
+ pcmcia_phy_end - pcmcia_phy_base,
+ DRV_NAME)) {
+ printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
+ return -EBUSY;
+ }
+
pcmcia_base=(unsigned long)ioremap(pcmcia_phy_base,
pcmcia_phy_end-pcmcia_phy_base);
@@ -236,7 +245,7 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
if (pcmp->pcmc_pipr & (M8XX_PCMCIA_CD1(_slot_)|M8XX_PCMCIA_CD2(_slot_))) {
printk ("No card in slot %c: PIPR=%08x\n",
'A' + _slot_, (u32) pcmp->pcmc_pipr);
- return; /* No card in slot */
+ return -ENODEV; /* No card in slot */
}
check_ide_device (pcmcia_base);
@@ -279,9 +288,6 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
}
#endif /* CONFIG_IDE_8xx_PCCARD */
- ide_hwifs[data_port].pio_mask = ATA_PIO4;
- ide_hwifs[data_port].set_pio_mode = m8xx_ide_set_pio_mode;
-
/* Enable Harddisk Interrupt,
* and make it edge sensitive
*/
@@ -296,6 +302,8 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
/* Enable falling edge irq */
pcmp->pcmc_per = 0x100000 >> (16 * _slot_);
#endif /* CONFIG_IDE_8xx_PCCARD */
+
+ return 0;
}
#endif /* CONFIG_IDE_8xx_PCCARD || CONFIG_IDE_8xx_DIRECT */
@@ -304,9 +312,9 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
* MPC8xx's internal PCMCIA interface
*/
#if defined(CONFIG_IDE_EXT_DIRECT)
-static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
+static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
{
- unsigned long *p = hw->io_ports;
+ unsigned long *p = hw->io_ports_array;
int i;
u32 ide_phy_base;
@@ -327,7 +335,12 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
printk ("IDE phys mem : %08x...%08x (size %08x)\n",
ide_phy_base, ide_phy_end,
ide_phy_end - ide_phy_base);
-
+
+ if (!request_mem_region(ide_phy_base, 0x200, DRV_NAME)) {
+ printk(KERN_ERR "%s: resources busy\n", DRV_NAME);
+ return -EBUSY;
+ }
+
ide_base=(unsigned long)ioremap(ide_phy_base,
ide_phy_end-ide_phy_base);
@@ -357,15 +370,14 @@ static void __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
hw->irq = ioport_dsc[data_port].irq;
hw->ack_intr = (ide_ack_intr_t *)ide_interrupt_ack;
- ide_hwifs[data_port].pio_mask = ATA_PIO4;
- ide_hwifs[data_port].set_pio_mode = m8xx_ide_set_pio_mode;
-
/* Enable Harddisk Interrupt,
* and make it edge sensitive
*/
/* (11-18) Set edge detect for irq, no wakeup from low power mode */
((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |=
(0x80000000 >> ioport_dsc[data_port].irq);
+
+ return 0;
}
#endif /* CONFIG_IDE_8xx_DIRECT */
@@ -426,10 +438,14 @@ static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
#elif defined(CONFIG_IDE_EXT_DIRECT)
printk("%s[%d] %s: not implemented yet!\n",
- __FILE__,__LINE__,__FUNCTION__);
+ __FILE__, __LINE__, __func__);
#endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */
}
+static const struct ide_port_ops m8xx_port_ops = {
+ .set_pio_mode = m8xx_ide_set_pio_mode,
+};
+
static void
ide_interrupt_ack (void *dev)
{
@@ -794,14 +810,30 @@ static int __init mpc8xx_ide_probe(void)
#ifdef IDE0_BASE_OFFSET
memset(&hw, 0, sizeof(hw));
- m8xx_ide_init_ports(&hw, 0);
- ide_init_port_hw(&ide_hwifs[0], &hw);
- idx[0] = 0;
+ if (!m8xx_ide_init_ports(&hw, 0)) {
+ ide_hwif_t *hwif = ide_find_port();
+
+ if (hwif) {
+ ide_init_port_hw(hwif, &hw);
+ hwif->pio_mask = ATA_PIO4;
+ hwif->port_ops = &m8xx_port_ops;
+
+ idx[0] = hwif->index;
+ }
+ }
#ifdef IDE1_BASE_OFFSET
memset(&hw, 0, sizeof(hw));
- m8xx_ide_init_ports(&hw, 1);
- ide_init_port_hw(&ide_hwifs[1], &hw);
- idx[1] = 1;
+ if (!m8xx_ide_init_ports(&hw, 1)) {
+ ide_hwif_t *mate = ide_find_port();
+
+ if (mate) {
+ ide_init_port_hw(mate, &hw);
+ mate->pio_mask = ATA_PIO4;
+ mate->port_ops = &m8xx_port_ops;
+
+ idx[1] = mate->index;
+ }
+ }
#endif
#endif
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 88619b50d9e..3cac6b2790d 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -79,8 +79,6 @@ typedef struct pmac_ide_hwif {
} pmac_ide_hwif_t;
-static pmac_ide_hwif_t pmac_ide[MAX_HWIFS];
-
enum {
controller_ohare, /* OHare based */
controller_heathrow, /* Heathrow/Paddington */
@@ -411,7 +409,7 @@ kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
*/
#define IDE_WAKEUP_DELAY (1*HZ)
-static int pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif);
+static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
static void pmac_ide_selectproc(ide_drive_t *drive);
static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
@@ -419,7 +417,7 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
#define PMAC_IDE_REG(x) \
- ((void __iomem *)((drive)->hwif->io_ports[IDE_DATA_OFFSET] + (x)))
+ ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
/*
* Apply the timings of the proper unit (master/slave) to the shared
@@ -920,12 +918,29 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
return 0;
}
+static const struct ide_port_ops pmac_ide_ata6_port_ops = {
+ .set_pio_mode = pmac_ide_set_pio_mode,
+ .set_dma_mode = pmac_ide_set_dma_mode,
+ .selectproc = pmac_ide_kauai_selectproc,
+};
+
+static const struct ide_port_ops pmac_ide_port_ops = {
+ .set_pio_mode = pmac_ide_set_pio_mode,
+ .set_dma_mode = pmac_ide_set_dma_mode,
+ .selectproc = pmac_ide_selectproc,
+};
+
+static const struct ide_dma_ops pmac_dma_ops;
+
static const struct ide_port_info pmac_port_info = {
+ .init_dma = pmac_ide_init_dma,
.chipset = ide_pmac,
+#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
+ .dma_ops = &pmac_dma_ops,
+#endif
+ .port_ops = &pmac_ide_port_ops,
.host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
- IDE_HFLAG_PIO_NO_DOWNGRADE |
IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
IDE_HFLAG_UNMASK_IRQS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -950,12 +965,15 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
pmif->broken_dma = pmif->broken_dma_warn = 0;
if (of_device_is_compatible(np, "shasta-ata")) {
pmif->kind = controller_sh_ata6;
+ d.port_ops = &pmac_ide_ata6_port_ops;
d.udma_mask = ATA_UDMA6;
} else if (of_device_is_compatible(np, "kauai-ata")) {
pmif->kind = controller_un_ata6;
+ d.port_ops = &pmac_ide_ata6_port_ops;
d.udma_mask = ATA_UDMA5;
} else if (of_device_is_compatible(np, "K2-UATA")) {
pmif->kind = controller_k2_ata6;
+ d.port_ops = &pmac_ide_ata6_port_ops;
d.udma_mask = ATA_UDMA5;
} else if (of_device_is_compatible(np, "keylargo-ata")) {
if (strcmp(np->name, "ata-4") == 0) {
@@ -1032,37 +1050,29 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
default_hwif_mmiops(hwif);
hwif->OUTBSYNC = pmac_outbsync;
- /* Tell common code _not_ to mess with resources */
- hwif->mmio = 1;
hwif->hwif_data = pmif;
ide_init_port_hw(hwif, hw);
- hwif->noprobe = pmif->mediabay;
hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
- hwif->set_pio_mode = pmac_ide_set_pio_mode;
- if (pmif->kind == controller_un_ata6
- || pmif->kind == controller_k2_ata6
- || pmif->kind == controller_sh_ata6)
- hwif->selectproc = pmac_ide_kauai_selectproc;
- else
- hwif->selectproc = pmac_ide_selectproc;
- hwif->set_dma_mode = pmac_ide_set_dma_mode;
printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
pmif->mediabay ? " (mediabay)" : "", hwif->irq);
-
+
+ if (pmif->mediabay) {
#ifdef CONFIG_PMAC_MEDIABAY
- if (pmif->mediabay && check_media_bay_by_base(pmif->regbase, MB_CD) == 0)
- hwif->noprobe = 0;
-#endif /* CONFIG_PMAC_MEDIABAY */
+ if (check_media_bay_by_base(pmif->regbase, MB_CD)) {
+#else
+ if (1) {
+#endif
+ hwif->drives[0].noprobe = 1;
+ hwif->drives[1].noprobe = 1;
+ }
+ }
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
if (pmif->cable_80 == 0)
d.udma_mask &= ATA_UDMA2;
- /* has a DBDMA controller channel */
- if (pmif->dma_regs == 0 || pmac_ide_setup_dma(pmif, hwif) < 0)
#endif
- d.udma_mask = d.mwdma_mask = 0;
idx[0] = hwif->index;
@@ -1076,8 +1086,9 @@ static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base)
int i;
for (i = 0; i < 8; ++i)
- hw->io_ports[i] = base + i * 0x10;
- hw->io_ports[8] = base + 0x160;
+ hw->io_ports_array[i] = base + i * 0x10;
+
+ hw->io_ports.ctl_addr = base + 0x160;
}
/*
@@ -1088,35 +1099,36 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
void __iomem *base;
unsigned long regbase;
- int irq;
ide_hwif_t *hwif;
pmac_ide_hwif_t *pmif;
- int i, rc;
+ int irq, rc;
hw_regs_t hw;
- i = 0;
- while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0
- || pmac_ide[i].node != NULL))
- ++i;
- if (i >= MAX_HWIFS) {
+ pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
+ if (pmif == NULL)
+ return -ENOMEM;
+
+ hwif = ide_find_port();
+ if (hwif == NULL) {
printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name);
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_free_pmif;
}
- pmif = &pmac_ide[i];
- hwif = &ide_hwifs[i];
-
if (macio_resource_count(mdev) == 0) {
- printk(KERN_WARNING "ide%d: no address for %s\n",
- i, mdev->ofdev.node->full_name);
- return -ENXIO;
+ printk(KERN_WARNING "ide-pmac: no address for %s\n",
+ mdev->ofdev.node->full_name);
+ rc = -ENXIO;
+ goto out_free_pmif;
}
/* Request memory resource for IO ports */
if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
- printk(KERN_ERR "ide%d: can't request mmio resource !\n", i);
- return -EBUSY;
+ printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
+ "%s!\n", mdev->ofdev.node->full_name);
+ rc = -EBUSY;
+ goto out_free_pmif;
}
/* XXX This is bogus. Should be fixed in the registry by checking
@@ -1125,8 +1137,8 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
* where that happens though...
*/
if (macio_irq_count(mdev) == 0) {
- printk(KERN_WARNING "ide%d: no intrs for device %s, using 13\n",
- i, mdev->ofdev.node->full_name);
+ printk(KERN_WARNING "ide-pmac: no intrs for device %s, using "
+ "13\n", mdev->ofdev.node->full_name);
irq = irq_create_mapping(NULL, 13);
} else
irq = macio_irq(mdev, 0);
@@ -1144,7 +1156,9 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
if (macio_resource_count(mdev) >= 2) {
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
- printk(KERN_WARNING "ide%d: can't request DMA resource !\n", i);
+ printk(KERN_WARNING "ide-pmac: can't request DMA "
+ "resource for %s!\n",
+ mdev->ofdev.node->full_name);
else
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
@@ -1166,11 +1180,15 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
iounmap(pmif->dma_regs);
macio_release_resource(mdev, 1);
}
- memset(pmif, 0, sizeof(*pmif));
macio_release_resource(mdev, 0);
+ kfree(pmif);
}
return rc;
+
+out_free_pmif:
+ kfree(pmif);
+ return rc;
}
static int
@@ -1215,7 +1233,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
pmac_ide_hwif_t *pmif;
void __iomem *base;
unsigned long rbase, rlen;
- int i, rc;
+ int rc;
hw_regs_t hw;
np = pci_device_to_OF_node(pdev);
@@ -1223,30 +1241,32 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
return -ENODEV;
}
- i = 0;
- while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0
- || pmac_ide[i].node != NULL))
- ++i;
- if (i >= MAX_HWIFS) {
+
+ pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
+ if (pmif == NULL)
+ return -ENOMEM;
+
+ hwif = ide_find_port();
+ if (hwif == NULL) {
printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
printk(KERN_ERR " %s\n", np->full_name);
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_free_pmif;
}
- pmif = &pmac_ide[i];
- hwif = &ide_hwifs[i];
-
if (pci_enable_device(pdev)) {
- printk(KERN_WARNING "ide%i: Can't enable PCI device for %s\n",
- i, np->full_name);
- return -ENXIO;
+ printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
+ "%s\n", np->full_name);
+ rc = -ENXIO;
+ goto out_free_pmif;
}
pci_set_master(pdev);
if (pci_request_regions(pdev, "Kauai ATA")) {
- printk(KERN_ERR "ide%d: Cannot obtain PCI resources for %s\n",
- i, np->full_name);
- return -ENXIO;
+ printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
+ "%s\n", np->full_name);
+ rc = -ENXIO;
+ goto out_free_pmif;
}
hwif->dev = &pdev->dev;
@@ -1276,11 +1296,15 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* The inteface is released to the common IDE layer */
pci_set_drvdata(pdev, NULL);
iounmap(base);
- memset(pmif, 0, sizeof(*pmif));
pci_release_regions(pdev);
+ kfree(pmif);
}
return rc;
+
+out_free_pmif:
+ kfree(pmif);
+ return rc;
}
static int
@@ -1652,18 +1676,31 @@ pmac_ide_dma_lost_irq (ide_drive_t *drive)
printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
}
+static const struct ide_dma_ops pmac_dma_ops = {
+ .dma_host_set = pmac_ide_dma_host_set,
+ .dma_setup = pmac_ide_dma_setup,
+ .dma_exec_cmd = pmac_ide_dma_exec_cmd,
+ .dma_start = pmac_ide_dma_start,
+ .dma_end = pmac_ide_dma_end,
+ .dma_test_irq = pmac_ide_dma_test_irq,
+ .dma_timeout = ide_dma_timeout,
+ .dma_lost_irq = pmac_ide_dma_lost_irq,
+};
+
/*
* Allocate the data structures needed for using DMA with an interface
* and fill the proper list of functions pointers
*/
-static int __devinit pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
+ const struct ide_port_info *d)
{
+ pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
struct pci_dev *dev = to_pci_dev(hwif->dev);
/* We won't need pci_dev if we switch to generic consistent
* DMA routines ...
*/
- if (dev == NULL)
+ if (dev == NULL || pmif->dma_regs == 0)
return -ENODEV;
/*
* Allocate space for the DBDMA commands.
@@ -1682,18 +1719,14 @@ static int __devinit pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
hwif->sg_max_nents = MAX_DCMDS;
- hwif->dma_host_set = &pmac_ide_dma_host_set;
- hwif->dma_setup = &pmac_ide_dma_setup;
- hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd;
- hwif->dma_start = &pmac_ide_dma_start;
- hwif->ide_dma_end = &pmac_ide_dma_end;
- hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
- hwif->dma_timeout = &ide_dma_timeout;
- hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
-
return 0;
}
-
+#else
+static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
+ const struct ide_port_info *d)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
module_init(pmac_ide_probe);
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index f7ede0e4288..5171601fb25 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -20,73 +20,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-
-/**
- * ide_match_hwif - match a PCI IDE against an ide_hwif
- * @io_base: I/O base of device
- * @bootable: set if its bootable
- * @name: name of device
- *
- * Match a PCI IDE port against an entry in ide_hwifs[],
- * based on io_base port if possible. Return the matching hwif,
- * or a new hwif. If we find an error (clashing, out of devices, etc)
- * return NULL
- *
- * FIXME: we need to handle mmio matches here too
- */
-
-static ide_hwif_t *ide_match_hwif(unsigned long io_base, u8 bootable, const char *name)
-{
- int h;
- ide_hwif_t *hwif;
-
- /*
- * Look for a hwif with matching io_base default value.
- * If chipset is "ide_unknown", then claim that hwif slot.
- * Otherwise, some other chipset has already claimed it.. :(
- */
- for (h = 0; h < MAX_HWIFS; ++h) {
- hwif = &ide_hwifs[h];
- if (hwif->io_ports[IDE_DATA_OFFSET] == io_base) {
- if (hwif->chipset == ide_unknown)
- return hwif; /* match */
- printk(KERN_ERR "%s: port 0x%04lx already claimed by %s\n",
- name, io_base, hwif->name);
- return NULL; /* already claimed */
- }
- }
- /*
- * Okay, there is no hwif matching our io_base,
- * so we'll just claim an unassigned slot.
- * Give preference to claiming other slots before claiming ide0/ide1,
- * just in case there's another interface yet-to-be-scanned
- * which uses ports 1f0/170 (the ide0/ide1 defaults).
- *
- * Unless there is a bootable card that does not use the standard
- * ports 1f0/170 (the ide0/ide1 defaults). The (bootable) flag.
- */
- if (bootable) {
- for (h = 0; h < MAX_HWIFS; ++h) {
- hwif = &ide_hwifs[h];
- if (hwif->chipset == ide_unknown)
- return hwif; /* pick an unused entry */
- }
- } else {
- for (h = 2; h < MAX_HWIFS; ++h) {
- hwif = ide_hwifs + h;
- if (hwif->chipset == ide_unknown)
- return hwif; /* pick an unused entry */
- }
- }
- for (h = 0; h < 2 && h < MAX_HWIFS; ++h) {
- hwif = ide_hwifs + h;
- if (hwif->chipset == ide_unknown)
- return hwif; /* pick an unused entry */
- }
- printk(KERN_ERR "%s: too many IDE interfaces, no room in table\n", name);
- return NULL;
-}
-
/**
* ide_setup_pci_baseregs - place a PCI IDE controller native
* @dev: PCI device of interface to switch native
@@ -94,13 +27,13 @@ static ide_hwif_t *ide_match_hwif(unsigned long io_base, u8 bootable, const char
*
* We attempt to place the PCI interface into PCI native mode. If
* we succeed the BARs are ok and the controller is in PCI mode.
- * Returns 0 on success or an errno code.
+ * Returns 0 on success or an errno code.
*
* FIXME: if we program the interface and then fail to set the BARS
* we don't switch it back to legacy mode. Do we actually care ??
*/
-
-static int ide_setup_pci_baseregs (struct pci_dev *dev, const char *name)
+
+static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
{
u8 progif = 0;
@@ -139,16 +72,16 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name)
}
/**
- * ide_get_or_set_dma_base - setup BMIBA
- * @d: IDE port info
+ * ide_pci_dma_base - setup BMIBA
* @hwif: IDE interface
+ * @d: IDE port info
*
* Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
* Where a device has a partner that is already in DMA mode we check
* and enforce IDE simplex rules.
*/
-static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_hwif_t *hwif)
+unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long dma_base = 0;
@@ -199,6 +132,31 @@ static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_
out:
return dma_base;
}
+EXPORT_SYMBOL_GPL(ide_pci_dma_base);
+
+/*
+ * Set up BM-DMA capability (PnP BIOS should have done this)
+ */
+int ide_pci_set_master(struct pci_dev *dev, const char *name)
+{
+ u16 pcicmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
+
+ if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
+ pci_set_master(dev);
+
+ if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
+ (pcicmd & PCI_COMMAND_MASTER) == 0) {
+ printk(KERN_ERR "%s: error updating PCICMD on %s\n",
+ name, pci_name(dev));
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_set_master);
#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
@@ -207,7 +165,6 @@ void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
" PCI slot %s\n", d->name, dev->vendor, dev->device,
dev->revision, pci_name(dev));
}
-
EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
@@ -220,13 +177,13 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
* but if that fails then we only need IO space. The PCI code should
* have setup the proper resources for us already for controllers in
* legacy mode.
- *
+ *
* Returns zero on success or an error code
*/
static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
{
- int ret;
+ int ret, bars;
if (pci_enable_device(dev)) {
ret = pci_enable_device_io(dev);
@@ -249,13 +206,21 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
goto out;
}
- /* FIXME: Temporary - until we put in the hotplug interface logic
- Check that the bits we want are not in use by someone else. */
- ret = pci_request_region(dev, 4, "ide_tmp");
- if (ret < 0)
- goto out;
+ if (d->host_flags & IDE_HFLAG_SINGLE)
+ bars = (1 << 2) - 1;
+ else
+ bars = (1 << 4) - 1;
+
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ if (d->host_flags & IDE_HFLAG_CS5520)
+ bars |= (1 << 2);
+ else
+ bars |= (1 << 4);
+ }
- pci_release_region(dev, 4);
+ ret = pci_request_selected_regions(dev, bars, d->name);
+ if (ret < 0)
+ printk(KERN_ERR "%s: can't reserve resources\n", d->name);
out:
return ret;
}
@@ -279,8 +244,8 @@ static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
* Maybe the user deliberately *disabled* the device,
* but we'll eventually ignore it again if no drives respond.
*/
- if (ide_setup_pci_baseregs(dev, d->name) || pci_write_config_word(dev, PCI_COMMAND, pcicmd|PCI_COMMAND_IO))
- {
+ if (ide_setup_pci_baseregs(dev, d->name) ||
+ pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
printk(KERN_INFO "%s: device disabled (BIOS)\n", d->name);
return -ENODEV;
}
@@ -301,26 +266,24 @@ static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
* @d: IDE port info
* @bar: BAR number
*
- * Checks if a BAR is configured and points to MMIO space. If so
- * print an error and return an error code. Otherwise return 0
+ * Checks if a BAR is configured and points to MMIO space. If so,
+ * return an error code. Otherwise return 0
*/
-static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d, int bar)
+static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
+ int bar)
{
ulong flags = pci_resource_flags(dev, bar);
-
+
/* Unconfigured ? */
if (!flags || pci_resource_len(dev, bar) == 0)
return 0;
- /* I/O space */
- if(flags & PCI_BASE_ADDRESS_IO_MASK)
+ /* I/O space */
+ if (flags & IORESOURCE_IO)
return 0;
-
+
/* Bad */
- printk(KERN_ERR "%s: IO baseregs (BIOS) are reported "
- "as MEM, report to "
- "<andre@linux-ide.org>.\n", d->name);
return -EINVAL;
}
@@ -344,14 +307,16 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
{
unsigned long ctl = 0, base = 0;
ide_hwif_t *hwif;
- u8 bootable = (d->host_flags & IDE_HFLAG_BOOTABLE) ? 1 : 0;
struct hw_regs_s hw;
if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
- /* Possibly we should fail if these checks report true */
- ide_pci_check_iomem(dev, d, 2*port);
- ide_pci_check_iomem(dev, d, 2*port+1);
-
+ if (ide_pci_check_iomem(dev, d, 2 * port) ||
+ ide_pci_check_iomem(dev, d, 2 * port + 1)) {
+ printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported "
+ "as MEM for port %d!\n", d->name, port);
+ return NULL;
+ }
+
ctl = pci_resource_start(dev, 2*port+1);
base = pci_resource_start(dev, 2*port);
if ((ctl && !base) || (base && !ctl)) {
@@ -360,14 +325,18 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
return NULL;
}
}
- if (!ctl)
- {
+ if (!ctl) {
/* Use default values */
ctl = port ? 0x374 : 0x3f4;
base = port ? 0x170 : 0x1f0;
}
- if ((hwif = ide_match_hwif(base, bootable, d->name)) == NULL)
- return NULL; /* no room in ide_hwifs[] */
+
+ hwif = ide_find_port_slot(d);
+ if (hwif == NULL) {
+ printk(KERN_ERR "%s: too many IDE interfaces, no room in "
+ "table\n", d->name);
+ return NULL;
+ }
memset(&hw, 0, sizeof(hw));
hw.irq = irq;
@@ -378,7 +347,6 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
ide_init_port_hw(hwif, &hw);
hwif->dev = &dev->dev;
- hwif->cds = d;
return hwif;
}
@@ -394,40 +362,33 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
* state
*/
-void ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
+int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
- u16 pcicmd;
-
- pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
(dev->class & 0x80))) {
- unsigned long dma_base = ide_get_or_set_dma_base(d, hwif);
- if (dma_base && !(pcicmd & PCI_COMMAND_MASTER)) {
- /*
- * Set up BM-DMA capability
- * (PnP BIOS should have done this)
- */
- pci_set_master(dev);
- if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) || !(pcicmd & PCI_COMMAND_MASTER)) {
- printk(KERN_ERR "%s: %s error updating PCICMD\n",
- hwif->name, d->name);
- dma_base = 0;
- }
- }
- if (dma_base) {
- if (d->init_dma) {
- d->init_dma(hwif, dma_base);
- } else {
- ide_setup_dma(hwif, dma_base);
- }
- } else {
- printk(KERN_INFO "%s: %s Bus-Master DMA disabled "
- "(BIOS)\n", hwif->name, d->name);
- }
+ unsigned long base = ide_pci_dma_base(hwif, d);
+
+ if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+ return -1;
+
+ if (hwif->mmio)
+ printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
+ else
+ printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
+ hwif->name, base, base + 7);
+
+ hwif->extra_base = base + (hwif->channel ? 8 : 16);
+
+ if (ide_allocate_dma_engine(hwif))
+ return -1;
+
+ ide_setup_dma(hwif, base);
}
+
+ return 0;
}
#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
@@ -514,7 +475,6 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
*(idx + port) = hwif->index;
}
}
-
EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
/*
@@ -597,7 +557,6 @@ int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d)
return ret;
}
-
EXPORT_SYMBOL_GPL(ide_setup_pci_device);
int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
@@ -621,5 +580,4 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
out:
return ret;
}
-
EXPORT_SYMBOL_GPL(ide_setup_pci_devices);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 0d13fe0a260..3d6d9461c31 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -160,6 +160,7 @@ struct ehca_qp {
};
u32 qp_type;
enum ehca_ext_qp_type ext_type;
+ enum ib_qp_state state;
struct ipz_queue ipz_squeue;
struct ipz_queue ipz_rqueue;
struct h_galpas galpas;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index b5ca94c6b8d..ca5eb0cb628 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -633,7 +633,7 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
unsigned long flags;
WARN_ON_ONCE(!in_interrupt());
- if (ehca_debug_level)
+ if (ehca_debug_level >= 3)
ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 65b3362cdb9..65048976198 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -50,7 +50,7 @@
#include "ehca_tools.h"
#include "hcp_if.h"
-#define HCAD_VERSION "0025"
+#define HCAD_VERSION "0026"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -60,7 +60,6 @@ MODULE_VERSION(HCAD_VERSION);
static int ehca_open_aqp1 = 0;
static int ehca_hw_level = 0;
static int ehca_poll_all_eqs = 1;
-static int ehca_mr_largepage = 1;
int ehca_debug_level = 0;
int ehca_nr_ports = 2;
@@ -70,45 +69,40 @@ int ehca_static_rate = -1;
int ehca_scaling_code = 0;
int ehca_lock_hcalls = -1;
-module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO);
-module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
-module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
-module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
-module_param_named(use_hp_mr, ehca_use_hp_mr, int, S_IRUGO);
-module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
-module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, S_IRUGO);
-module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
-module_param_named(scaling_code, ehca_scaling_code, int, S_IRUGO);
-module_param_named(mr_largepage, ehca_mr_largepage, int, S_IRUGO);
+module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
+module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
+module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
+module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
+module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
+module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
+module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
+module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
+module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO);
MODULE_PARM_DESC(open_aqp1,
- "AQP1 on startup (0: no (default), 1: yes)");
+ "Open AQP1 on startup (default: no)");
MODULE_PARM_DESC(debug_level,
- "debug level"
- " (0: no debug traces (default), 1: with debug traces)");
+ "Amount of debug output (0: none (default), 1: traces, "
+ "2: some dumps, 3: lots)");
MODULE_PARM_DESC(hw_level,
- "hardware level"
- " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
+ "Hardware level (0: autosensing (default), "
+ "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
MODULE_PARM_DESC(nr_ports,
"number of connected ports (-1: autodetect, 1: port one only, "
"2: two ports (default)");
MODULE_PARM_DESC(use_hp_mr,
- "high performance MRs (0: no (default), 1: yes)");
+ "Use high performance MRs (default: no)");
MODULE_PARM_DESC(port_act_time,
- "time to wait for port activation (default: 30 sec)");
+ "Time to wait for port activation (default: 30 sec)");
MODULE_PARM_DESC(poll_all_eqs,
- "polls all event queues periodically"
- " (0: no, 1: yes (default))");
+ "Poll all event queues periodically (default: yes)");
MODULE_PARM_DESC(static_rate,
- "set permanent static rate (default: disabled)");
+ "Set permanent static rate (default: no static rate)");
MODULE_PARM_DESC(scaling_code,
- "set scaling code (0: disabled/default, 1: enabled)");
-MODULE_PARM_DESC(mr_largepage,
- "use large page for MR (0: use PAGE_SIZE (default), "
- "1: use large page depending on MR size");
+ "Enable scaling code (default: no)");
MODULE_PARM_DESC(lock_hcalls,
- "serialize all hCalls made by the driver "
+ "Serialize all hCalls made by the driver "
"(default: autodetect)");
DEFINE_RWLOCK(ehca_qp_idr_lock);
@@ -275,6 +269,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
u64 h_ret;
struct hipz_query_hca *rblock;
struct hipz_query_port *port;
+ const char *loc_code;
static const u32 pgsize_map[] = {
HCA_CAP_MR_PGSIZE_4K, 0x1000,
@@ -283,6 +278,12 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
HCA_CAP_MR_PGSIZE_16M, 0x1000000,
};
+ ehca_gen_dbg("Probing adapter %s...",
+ shca->ofdev->node->full_name);
+ loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
+ if (loc_code)
+ ehca_gen_dbg(" ... location lode=%s", loc_code);
+
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_gen_err("Cannot allocate rblock memory.");
@@ -350,11 +351,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
/* translate supported MR page sizes; always support 4K */
shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
- if (ehca_mr_largepage) { /* support extra sizes only if enabled */
- for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
- if (rblock->memory_page_size_supported & pgsize_map[i])
- shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
- }
+ for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
+ if (rblock->memory_page_size_supported & pgsize_map[i])
+ shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
/* query max MTU from first port -- it's the same for all ports */
port = (struct hipz_query_port *)rblock;
@@ -567,8 +566,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- ehca_debug_level);
+ return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
}
static ssize_t ehca_store_debug_level(struct device_driver *ddp,
@@ -657,14 +655,6 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
}
static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
-static ssize_t ehca_show_mr_largepage(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", ehca_mr_largepage);
-}
-static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL);
-
static struct attribute *ehca_dev_attrs[] = {
&dev_attr_adapter_handle.attr,
&dev_attr_num_ports.attr,
@@ -681,7 +671,6 @@ static struct attribute *ehca_dev_attrs[] = {
&dev_attr_cur_mw.attr,
&dev_attr_max_pd.attr,
&dev_attr_max_ah.attr,
- &dev_attr_mr_largepage.attr,
NULL
};
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index f26997fc00f..46ae4eb2c4e 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1794,8 +1794,9 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
int t;
for (t = start_idx; t <= end_idx; t++) {
u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
- ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
- *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
+ if (ehca_debug_level >= 3)
+ ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
+ *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
ehca_gen_err("uncontiguous page found pgaddr=%lx "
"prev_pgaddr=%lx page_list_i=%x",
@@ -1862,10 +1863,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
pgaddr &
~(pginfo->hwpage_size - 1));
}
- ehca_gen_dbg("kpage=%lx chunk_page=%lx "
- "value=%016lx", *kpage, pgaddr,
- *(u64 *)abs_to_virt(
- phys_to_abs(pgaddr)));
+ if (ehca_debug_level >= 3) {
+ u64 val = *(u64 *)abs_to_virt(
+ phys_to_abs(pgaddr));
+ ehca_gen_dbg("kpage=%lx chunk_page=%lx "
+ "value=%016lx",
+ *kpage, pgaddr, val);
+ }
prev_pgaddr = pgaddr;
i++;
pginfo->kpage_cnt++;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 3eb14a52cbf..57bef1152cc 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -550,6 +550,7 @@ static struct ehca_qp *internal_create_qp(
spin_lock_init(&my_qp->spinlock_r);
my_qp->qp_type = qp_type;
my_qp->ext_type = parms.ext_type;
+ my_qp->state = IB_QPS_RESET;
if (init_attr->recv_cq)
my_qp->recv_cq =
@@ -965,7 +966,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
qp_num, bad_send_wqe_p);
/* convert wqe pointer to vadr */
bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
squeue = &my_qp->ipz_squeue;
if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
@@ -978,7 +979,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = 0;
while (wqe->optype != 0xff && wqe->wqef != 0xff) {
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
wqe->nr_of_data_seg = 0; /* suppress data access */
wqe->wqef = WQEF_PURGE; /* WQE to be purged */
@@ -1450,7 +1451,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
/* no support for max_send/recv_sge yet */
}
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
@@ -1508,6 +1509,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_QKEY)
my_qp->qkey = attr->qkey;
+ my_qp->state = qp_new_state;
+
modify_qp_exit2:
if (squeue_locked) { /* this means: sqe -> rts */
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1763,7 +1766,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (qp_init_attr)
*qp_init_attr = my_qp->init_attr;
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
query_qp_exit1:
@@ -1811,7 +1814,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
goto modify_srq_exit0;
}
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
@@ -1864,7 +1867,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
srq_attr->srq_limit = EHCA_BMASK_GET(
MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
query_srq_exit1:
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index a20bbf46618..bbe0436f4f7 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -81,7 +81,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
recv_wr->sg_list[cnt_ds].length;
}
- if (ehca_debug_level) {
+ if (ehca_debug_level >= 3) {
ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
ipz_rqueue);
ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
@@ -281,7 +281,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
return -EINVAL;
}
- if (ehca_debug_level) {
+ if (ehca_debug_level >= 3) {
ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
}
@@ -421,6 +421,11 @@ int ehca_post_send(struct ib_qp *qp,
int ret = 0;
unsigned long flags;
+ if (unlikely(my_qp->state != IB_QPS_RTS)) {
+ ehca_err(qp->device, "QP not in RTS state qpn=%x", qp->qp_num);
+ return -EINVAL;
+ }
+
/* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_s, flags);
@@ -454,13 +459,14 @@ int ehca_post_send(struct ib_qp *qp,
goto post_send_exit0;
}
wqe_cnt++;
- ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
- my_qp, qp->qp_num, wqe_cnt);
} /* eof for cur_send_wr */
post_send_exit0:
iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, wqe_cnt);
+ if (unlikely(ret || ehca_debug_level >= 2))
+ ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
+ my_qp, qp->qp_num, wqe_cnt, ret);
my_qp->message_count += wqe_cnt;
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
return ret;
@@ -520,13 +526,14 @@ static int internal_post_recv(struct ehca_qp *my_qp,
goto post_recv_exit0;
}
wqe_cnt++;
- ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
- my_qp, my_qp->real_qp_num, wqe_cnt);
} /* eof for cur_recv_wr */
post_recv_exit0:
iosync(); /* serialize GAL register access */
hipz_update_rqa(my_qp, wqe_cnt);
+ if (unlikely(ret || ehca_debug_level >= 2))
+ ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
+ my_qp, my_qp->real_qp_num, wqe_cnt, ret);
spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
return ret;
}
@@ -570,16 +577,17 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
struct ehca_cqe *cqe;
struct ehca_qp *my_qp;
- int cqe_count = 0;
+ int cqe_count = 0, is_error;
poll_cq_one_read_cqe:
cqe = (struct ehca_cqe *)
ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
if (!cqe) {
ret = -EAGAIN;
- ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
- "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret);
- goto poll_cq_one_exit0;
+ if (ehca_debug_level >= 3)
+ ehca_dbg(cq->device, "Completion queue is empty "
+ "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
+ goto poll_cq_one_exit0;
}
/* prevents loads being reordered across this point */
@@ -609,7 +617,7 @@ poll_cq_one_read_cqe:
ehca_dbg(cq->device,
"Got CQE with purged bit qp_num=%x src_qp=%x",
cqe->local_qp_number, cqe->remote_qp_number);
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
cqe->local_qp_number,
cqe->remote_qp_number);
@@ -622,11 +630,13 @@ poll_cq_one_read_cqe:
}
}
- /* tracing cqe */
- if (unlikely(ehca_debug_level)) {
+ is_error = cqe->status & WC_STATUS_ERROR_BIT;
+
+ /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
+ if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
ehca_dbg(cq->device,
- "Received COMPLETION ehca_cq=%p cq_num=%x -----",
- my_cq, my_cq->cq_number);
+ "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
+ is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
my_cq, my_cq->cq_number);
ehca_dbg(cq->device,
@@ -649,8 +659,9 @@ poll_cq_one_read_cqe:
/* update also queue adder to throw away this entry!!! */
goto poll_cq_one_exit0;
}
+
/* eval ib_wc_status */
- if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
+ if (unlikely(is_error)) {
/* complete with errors */
map_ib_wc_status(cqe->status, &wc->status);
wc->vendor_err = wc->status;
@@ -671,14 +682,6 @@ poll_cq_one_read_cqe:
wc->imm_data = cpu_to_be32(cqe->immediate_data);
wc->sl = cqe->service_level;
- if (unlikely(wc->status != IB_WC_SUCCESS))
- ehca_dbg(cq->device,
- "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
- "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
- "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
- cqe->status, cqe->local_qp_number,
- cqe->remote_qp_number, cqe->work_request_id, cqe);
-
poll_cq_one_exit0:
if (cqe_count > 0)
hipz_update_feca(my_cq, cqe_count);
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 1b07f2beafa..e43ed8f8a0c 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -211,8 +211,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
break;
case 1: /* qp rqueue_addr */
- ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
- qp->ib_qp.qp_num);
+ ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
&qp->mm_count_rqueue);
if (unlikely(ret)) {
@@ -224,8 +223,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
break;
case 2: /* qp squeue_addr */
- ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
- qp->ib_qp.qp_num);
+ ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
&qp->mm_count_squeue);
if (unlikely(ret)) {
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 7029aa65375..5245e13c3a3 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -123,8 +123,9 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
int i, sleep_msecs;
unsigned long flags = 0;
- ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
- opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ if (unlikely(ehca_debug_level >= 2))
+ ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
+ opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
@@ -148,7 +149,8 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
opcode, ret, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
else
- ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
+ if (unlikely(ehca_debug_level >= 2))
+ ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
return ret;
}
@@ -172,8 +174,10 @@ static long ehca_plpar_hcall9(unsigned long opcode,
int i, sleep_msecs;
unsigned long flags = 0;
- ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
- arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ if (unlikely(ehca_debug_level >= 2))
+ ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
@@ -201,7 +205,7 @@ static long ehca_plpar_hcall9(unsigned long opcode,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
outs[8]);
- } else
+ } else if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
@@ -381,7 +385,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
r_cb, /* r6 */
0, 0, 0, 0);
- if (ehca_debug_level)
+ if (ehca_debug_level >= 2)
ehca_dmp(query_port_response_block, 64, "response_block");
return ret;
@@ -731,9 +735,6 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
- ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x "
- "vaddr=%lx length=%lx",
- (u32)PAGE_SIZE, access_ctrl, vaddr, length);
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
5, /* r5 */
@@ -758,7 +759,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
{
u64 ret;
- if (unlikely(ehca_debug_level >= 2)) {
+ if (unlikely(ehca_debug_level >= 3)) {
if (count > 1) {
u64 *kpage;
int i;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 3557e7edc9b..5e570bb0bb6 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
uar = &to_mucontext(context)->uar;
} else {
- err = mlx4_ib_db_alloc(dev, &cq->db, 1);
+ err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err)
goto err_cq;
@@ -250,7 +250,7 @@ err_mtt:
err_db:
if (!context)
- mlx4_ib_db_free(dev, &cq->db);
+ mlx4_db_free(dev->dev, &cq->db);
err_cq:
kfree(cq);
@@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
ib_umem_release(mcq->umem);
} else {
mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
- mlx4_ib_db_free(dev, &mcq->db);
+ mlx4_db_free(dev->dev, &mcq->db);
}
kfree(mcq);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 1c36087aef1..8e342cc9bae 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -34,124 +34,6 @@
#include "mlx4_ib.h"
-struct mlx4_ib_db_pgdir {
- struct list_head list;
- DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE);
- DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2);
- unsigned long *bits[2];
- __be32 *db_page;
- dma_addr_t db_dma;
-};
-
-static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev)
-{
- struct mlx4_ib_db_pgdir *pgdir;
-
- pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
- if (!pgdir)
- return NULL;
-
- bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2);
- pgdir->bits[0] = pgdir->order0;
- pgdir->bits[1] = pgdir->order1;
- pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device,
- PAGE_SIZE, &pgdir->db_dma,
- GFP_KERNEL);
- if (!pgdir->db_page) {
- kfree(pgdir);
- return NULL;
- }
-
- return pgdir;
-}
-
-static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir,
- struct mlx4_ib_db *db, int order)
-{
- int o;
- int i;
-
- for (o = order; o <= 1; ++o) {
- i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o);
- if (i < MLX4_IB_DB_PER_PAGE >> o)
- goto found;
- }
-
- return -ENOMEM;
-
-found:
- clear_bit(i, pgdir->bits[o]);
-
- i <<= o;
-
- if (o > order)
- set_bit(i ^ 1, pgdir->bits[order]);
-
- db->u.pgdir = pgdir;
- db->index = i;
- db->db = pgdir->db_page + db->index;
- db->dma = pgdir->db_dma + db->index * 4;
- db->order = order;
-
- return 0;
-}
-
-int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order)
-{
- struct mlx4_ib_db_pgdir *pgdir;
- int ret = 0;
-
- mutex_lock(&dev->pgdir_mutex);
-
- list_for_each_entry(pgdir, &dev->pgdir_list, list)
- if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order))
- goto out;
-
- pgdir = mlx4_ib_alloc_db_pgdir(dev);
- if (!pgdir) {
- ret = -ENOMEM;
- goto out;
- }
-
- list_add(&pgdir->list, &dev->pgdir_list);
-
- /* This should never fail -- we just allocated an empty page: */
- WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order));
-
-out:
- mutex_unlock(&dev->pgdir_mutex);
-
- return ret;
-}
-
-void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db)
-{
- int o;
- int i;
-
- mutex_lock(&dev->pgdir_mutex);
-
- o = db->order;
- i = db->index;
-
- if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
- clear_bit(i ^ 1, db->u.pgdir->order0);
- ++o;
- }
-
- i >>= o;
- set_bit(i, db->u.pgdir->bits[o]);
-
- if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) {
- dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE,
- db->u.pgdir->db_page, db->u.pgdir->db_dma);
- list_del(&db->u.pgdir->list);
- kfree(db->u.pgdir);
- }
-
- mutex_unlock(&dev->pgdir_mutex);
-}
-
struct mlx4_ib_user_db_page {
struct list_head list;
struct ib_umem *umem;
@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page {
};
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
- struct mlx4_ib_db *db)
+ struct mlx4_db *db)
{
struct mlx4_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
@@ -202,7 +84,7 @@ out:
return err;
}
-void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db)
+void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
{
mutex_lock(&context->db_page_mutex);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4d9b5ac4220..4d61e32866c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_uar;
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
- INIT_LIST_HEAD(&ibdev->pgdir_list);
- mutex_init(&ibdev->pgdir_mutex);
-
ibdev->dev = dev;
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 9e637323c15..5cf994794d2 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -43,24 +43,6 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
-enum {
- MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4
-};
-
-struct mlx4_ib_db_pgdir;
-struct mlx4_ib_user_db_page;
-
-struct mlx4_ib_db {
- __be32 *db;
- union {
- struct mlx4_ib_db_pgdir *pgdir;
- struct mlx4_ib_user_db_page *user_page;
- } u;
- dma_addr_t dma;
- int index;
- int order;
-};
-
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
@@ -88,7 +70,7 @@ struct mlx4_ib_cq {
struct mlx4_cq mcq;
struct mlx4_ib_cq_buf buf;
struct mlx4_ib_cq_resize *resize_buf;
- struct mlx4_ib_db db;
+ struct mlx4_db db;
spinlock_t lock;
struct mutex resize_mutex;
struct ib_umem *umem;
@@ -127,7 +109,7 @@ struct mlx4_ib_qp {
struct mlx4_qp mqp;
struct mlx4_buf buf;
- struct mlx4_ib_db db;
+ struct mlx4_db db;
struct mlx4_ib_wq rq;
u32 doorbell_qpn;
@@ -154,7 +136,7 @@ struct mlx4_ib_srq {
struct ib_srq ibsrq;
struct mlx4_srq msrq;
struct mlx4_buf buf;
- struct mlx4_ib_db db;
+ struct mlx4_db db;
u64 *wrid;
spinlock_t lock;
int head;
@@ -175,9 +157,6 @@ struct mlx4_ib_dev {
struct mlx4_dev *dev;
void __iomem *uar_map;
- struct list_head pgdir_list;
- struct mutex pgdir_mutex;
-
struct mlx4_uar priv_uar;
u32 priv_pdn;
MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
@@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
return container_of(ibah, struct mlx4_ib_ah, ibah);
}
-int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order);
-void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db);
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
- struct mlx4_ib_db *db);
-void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db);
+ struct mlx4_db *db);
+void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b75efae7e44..80ea8b9e776 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
if (!init_attr->srq) {
- err = mlx4_ib_db_alloc(dev, &qp->db, 0);
+ err = mlx4_db_alloc(dev->dev, &qp->db, 0);
if (err)
goto err;
@@ -580,7 +580,7 @@ err_buf:
err_db:
if (!pd->uobject && !init_attr->srq)
- mlx4_ib_db_free(dev, &qp->db);
+ mlx4_db_free(dev->dev, &qp->db);
err:
return err;
@@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
kfree(qp->rq.wrid);
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
if (!qp->ibqp.srq)
- mlx4_ib_db_free(dev, &qp->db);
+ mlx4_db_free(dev->dev, &qp->db);
}
}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index beaa3b06cf5..204619702f9 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
} else {
- err = mlx4_ib_db_alloc(dev, &srq->db, 0);
+ err = mlx4_db_alloc(dev->dev, &srq->db, 0);
if (err)
goto err_srq;
@@ -200,7 +200,7 @@ err_buf:
err_db:
if (!pd->uobject)
- mlx4_ib_db_free(dev, &srq->db);
+ mlx4_db_free(dev->dev, &srq->db);
err_srq:
kfree(srq);
@@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
kfree(msrq->wrid);
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
&msrq->buf);
- mlx4_ib_db_free(dev, &msrq->db);
+ mlx4_db_free(dev->dev, &msrq->db);
}
kfree(msrq);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b046262ed63..a4e9269a29b 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -139,8 +139,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
addr = ntohl(ifa->ifa_address);
mask = ntohl(ifa->ifa_mask);
- nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n",
- addr, mask);
+ nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address " NIPQUAD_FMT
+ ", netmask " NIPQUAD_FMT ".\n",
+ HIPQUAD(addr), HIPQUAD(mask));
list_for_each_entry(nesdev, &nes_dev_list, list) {
nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
nesdev, nesdev->netdev[0]->name);
@@ -353,13 +354,11 @@ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
*/
static void nes_print_macaddr(struct net_device *netdev)
{
- nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n",
- netdev->name,
- netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
- netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
- netdev->irq);
-}
+ DECLARE_MAC_BUF(mac);
+ nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n",
+ netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq);
+}
/**
* nes_interrupt - handle interrupts
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index d0738623bcf..d940fc27129 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -852,8 +852,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
/* get a handle on the hte */
hte = &cm_core->connected_nodes;
- nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n",
- loc_addr, loc_port, cm_core, hte);
+ nes_debug(NES_DBG_CM, "Searching for an owner node: " NIPQUAD_FMT ":%x from core %p->%p\n",
+ HIPQUAD(loc_addr), loc_port, cm_core, hte);
/* walk list and find cm_node associated with this session ID */
spin_lock_irqsave(&cm_core->ht_lock, flags);
@@ -902,8 +902,8 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
}
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n",
- dst_addr, dst_port);
+ nes_debug(NES_DBG_CM, "Unable to find listener for " NIPQUAD_FMT ":%x\n",
+ HIPQUAD(dst_addr), dst_port);
/* no listener */
return NULL;
@@ -1054,6 +1054,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
int arpindex = 0;
struct nes_device *nesdev;
struct nes_adapter *nesadapter;
+ DECLARE_MAC_BUF(mac);
/* create an hte and cm_node for this instance */
cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
@@ -1066,8 +1067,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loc_port = cm_info->loc_port;
cm_node->rem_port = cm_info->rem_port;
cm_node->send_write0 = send_first;
- nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n",
- cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port);
+ nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n",
+ HIPQUAD(cm_node->loc_addr), cm_node->loc_port,
+ HIPQUAD(cm_node->rem_addr), cm_node->rem_port);
cm_node->listener = listener;
cm_node->netdev = nesvnic->netdev;
cm_node->cm_id = cm_info->cm_id;
@@ -1116,11 +1118,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
/* copy the mac addr to node context */
memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
- nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x,"
- " %02x, %02x, %02x, %02x, %02x\n",
- cm_node->rem_mac[0], cm_node->rem_mac[1],
- cm_node->rem_mac[2], cm_node->rem_mac[3],
- cm_node->rem_mac[4], cm_node->rem_mac[5]);
+ nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n",
+ print_mac(mac, cm_node->rem_mac));
add_hte_node(cm_core, cm_node);
atomic_inc(&cm_nodes_created);
@@ -1850,8 +1849,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni
nfo.rem_addr = ntohl(iph->saddr);
nfo.rem_port = ntohs(tcph->source);
- nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n",
- iph->daddr, tcph->dest, iph->saddr, tcph->source);
+ nes_debug(NES_DBG_CM, "Received packet: dest=" NIPQUAD_FMT
+ ":0x%04X src=" NIPQUAD_FMT ":0x%04X\n",
+ NIPQUAD(iph->daddr), tcph->dest,
+ NIPQUAD(iph->saddr), tcph->source);
/* note: this call is going to increment cm_node ref count */
cm_node = find_node(cm_core,
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index aa53aab91bf..08964cc7e98 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -636,6 +636,15 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
return 0;
}
+
+ i = 0;
+ while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
+ mdelay(1);
+ if (i >= 10000) {
+ printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
+ return 0;
+ }
}
/* port reset */
@@ -684,17 +693,6 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
}
}
-
-
- i = 0;
- while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
- mdelay(1);
- if (i >= 10000) {
- printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
- nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
- return 0;
- }
-
return port_count;
}
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index b7e2844f096..8f36e231bdf 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -905,7 +905,7 @@ struct nes_hw_qp {
};
struct nes_hw_cq {
- struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */
+ struct nes_hw_cqe *cq_vbase; /* PCI memory for host rings */
void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
dma_addr_t cq_pbase; /* PCI memory for host rings */
u16 cq_head;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 01cd0effc49..e5366b013c1 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -787,16 +787,14 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
int i;
u32 macaddr_low;
u16 macaddr_high;
+ DECLARE_MAC_BUF(mac);
if (!is_valid_ether_addr(mac_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
- printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
- __func__, netdev->addr_len,
- mac_addr->sa_data[0], mac_addr->sa_data[1],
- mac_addr->sa_data[2], mac_addr->sa_data[3],
- mac_addr->sa_data[4], mac_addr->sa_data[5]);
+ printk(PFX "%s: Address length = %d, Address = %s\n",
+ __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data));
macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
macaddr_high += (u16)netdev->dev_addr[1];
macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
@@ -878,11 +876,11 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
if (mc_nic_index < 0)
mc_nic_index = nesvnic->nic_index;
if (multicast_addr) {
- nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n",
- multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1],
- multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3],
- multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5],
- perfect_filter_register_address+(mc_index * 8), mc_nic_index);
+ DECLARE_MAC_BUF(mac);
+ nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
+ print_mac(mac, multicast_addr->dmi_addr),
+ perfect_filter_register_address+(mc_index * 8),
+ mc_nic_index);
macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
macaddr_high += (u16)multicast_addr->dmi_addr[1];
macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index f9db07c2717..c6d5631a699 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -660,7 +660,9 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
/* DELETE or RESOLVE */
if (arp_index == nesadapter->arp_table_size) {
- nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n");
+ nes_debug(NES_DBG_NETDEV, "MAC for " NIPQUAD_FMT " not in ARP table - cannot %s\n",
+ HIPQUAD(ip_addr),
+ action == NES_ARP_RESOLVE ? "resolve" : "delete");
return -1;
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index f9a5d439089..ee74f7c7a6d 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1976,7 +1976,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
if (nescq->cq_mem_size)
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
- (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
+ nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
kfree(nescq);
return ret;
@@ -3610,6 +3610,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
while (cqe_count < num_entries) {
if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
NES_CQE_VALID) {
+ /*
+ * Make sure we read CQ entry contents *after*
+ * we've checked the valid bit.
+ */
+ rmb();
+
cqe = nescq->hw_cq.cq_vbase[head];
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 73b2b176ad0..f1f142dc64b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -56,11 +56,11 @@
/* constants */
enum {
- IPOIB_PACKET_SIZE = 2048,
- IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
-
IPOIB_ENCAP_LEN = 4,
+ IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+ IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
+
IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
@@ -139,7 +139,7 @@ struct ipoib_mcast {
struct ipoib_rx_buf {
struct sk_buff *skb;
- u64 mapping;
+ u64 mapping[IPOIB_UD_RX_SG];
};
struct ipoib_tx_buf {
@@ -294,6 +294,7 @@ struct ipoib_dev_priv {
unsigned int admin_mtu;
unsigned int mcast_mtu;
+ unsigned int max_ib_mtu;
struct ipoib_rx_buf *rx_ring;
@@ -305,6 +306,9 @@ struct ipoib_dev_priv {
struct ib_send_wr tx_wr;
unsigned tx_outstanding;
+ struct ib_recv_wr rx_wr;
+ struct ib_sge rx_sge[IPOIB_UD_RX_SG];
+
struct ib_wc ibwc[IPOIB_NUM_WC];
struct list_head dead_ahs;
@@ -366,6 +370,14 @@ struct ipoib_neigh {
struct list_head list;
};
+#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
+#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
+
+static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
+{
+ return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
+}
+
/*
* We stash a pointer to our private neighbour information after our
* hardware address in neigh->ha. The ALIGN() expression here makes
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0205eb7c1bd..7cf1fa7074a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref)
spin_unlock_irqrestore(&priv->lock, flags);
}
+static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
+ u64 mapping[IPOIB_UD_RX_SG])
+{
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
+ DMA_FROM_DEVICE);
+ ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ } else
+ ib_dma_unmap_single(priv->ca, mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
+ DMA_FROM_DEVICE);
+}
+
+static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
+ struct sk_buff *skb,
+ unsigned int length)
+{
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int size;
+ /*
+ * There is only two buffers needed for max_payload = 4K,
+ * first buf size is IPOIB_UD_HEAD_SIZE
+ */
+ skb->tail += IPOIB_UD_HEAD_SIZE;
+ skb->len += length;
+
+ size = length - IPOIB_UD_HEAD_SIZE;
+
+ frag->size = size;
+ skb->data_len += size;
+ skb->truesize += size;
+ } else
+ skb_put(skb, length);
+
+}
+
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_sge list;
- struct ib_recv_wr param;
struct ib_recv_wr *bad_wr;
int ret;
- list.addr = priv->rx_ring[id].mapping;
- list.length = IPOIB_BUF_SIZE;
- list.lkey = priv->mr->lkey;
+ priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
+ priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
+ priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
- param.next = NULL;
- param.wr_id = id | IPOIB_OP_RECV;
- param.sg_list = &list;
- param.num_sge = 1;
- ret = ib_post_recv(priv->qp, &param, &bad_wr);
+ ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
- ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
@@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
return ret;
}
-static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
+static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
- u64 addr;
+ int buf_size;
+ u64 *mapping;
- skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
- if (!skb)
- return -ENOMEM;
+ if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ buf_size = IPOIB_UD_HEAD_SIZE;
+ else
+ buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+
+ skb = dev_alloc_skb(buf_size + 4);
+ if (unlikely(!skb))
+ return NULL;
/*
* IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
@@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
*/
skb_reserve(skb, 4);
- addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
- dev_kfree_skb_any(skb);
- return -EIO;
+ mapping = priv->rx_ring[id].mapping;
+ mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
+ goto error;
+
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ struct page *page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ goto partial_error;
+ skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
+ mapping[1] =
+ ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
+ goto partial_error;
}
- priv->rx_ring[id].skb = skb;
- priv->rx_ring[id].mapping = addr;
+ priv->rx_ring[id].skb = skb;
+ return skb;
- return 0;
+partial_error:
+ ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
+error:
+ dev_kfree_skb_any(skb);
+ return NULL;
}
static int ipoib_ib_post_receives(struct net_device *dev)
@@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev)
int i;
for (i = 0; i < ipoib_recvq_size; ++i) {
- if (ipoib_alloc_rx_skb(dev, i)) {
+ if (!ipoib_alloc_rx_skb(dev, i)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
return -ENOMEM;
}
@@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
- u64 addr;
+ u64 mapping[IPOIB_UD_RX_SG];
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
skb = priv->rx_ring[wr_id].skb;
- addr = priv->rx_ring[wr_id].mapping;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- ib_dma_unmap_single(priv->ca, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
@@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
goto repost;
+ memcpy(mapping, priv->rx_ring[wr_id].mapping,
+ IPOIB_UD_RX_SG * sizeof *mapping);
+
/*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
- if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+ if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
++dev->stats.rx_dropped;
goto repost;
}
@@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
- ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ipoib_ud_dma_unmap_rx(priv, mapping);
+ ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
- skb_put(skb, wc->byte_len);
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -733,10 +786,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
rx_req = &priv->rx_ring[i];
if (!rx_req->skb)
continue;
- ib_dma_unmap_single(priv->ca,
- rx_req->mapping,
- IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
+ ipoib_ud_dma_unmap_rx(priv,
+ priv->rx_ring[i].mapping);
dev_kfree_skb_any(rx_req->skb);
rx_req->skb = NULL;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bd07f02cf02..7a4ed9d3d84 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
- if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
+ if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
priv->admin_mtu = new_mtu;
@@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev)
NETIF_F_LLTX |
NETIF_F_HIGHDMA);
- /* MTU will be reset when mcast join happens */
- dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
- priv->mcast_mtu = priv->admin_mtu = dev->mtu;
-
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
netif_carrier_off(dev);
@@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format,
{
struct ipoib_dev_priv *priv;
struct ib_device_attr *device_attr;
+ struct ib_port_attr attr;
int result = -ENOMEM;
priv = ipoib_intf_alloc(format);
@@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format,
SET_NETDEV_DEV(priv->dev, hca->dma_device);
+ if (!ib_query_port(hca, port, &attr))
+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+ else {
+ printk(KERN_WARNING "%s: ib_query_port %d failed\n",
+ hca->name, port);
+ goto device_init_failed;
+ }
+
+ /* MTU will be reset when mcast join happens */
+ priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
+ priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 31a53c5bcb1..d00a2c174ae 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
return;
}
- priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
- IPOIB_ENCAP_LEN;
+ priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
if (!ipoib_cm_admin_enabled(dev))
dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 8a20e3742c4..07c03f178a4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
.max_send_sge = 1,
- .max_recv_sge = 1
+ .max_recv_sge = IPOIB_UD_RX_SG
},
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_UD
@@ -215,6 +215,19 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->tx_wr.sg_list = priv->tx_sge;
priv->tx_wr.send_flags = IB_SEND_SIGNALED;
+ priv->rx_sge[0].lkey = priv->mr->lkey;
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+ priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
+ priv->rx_sge[1].length = PAGE_SIZE;
+ priv->rx_sge[1].lkey = priv->mr->lkey;
+ priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
+ } else {
+ priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ priv->rx_wr.num_sge = 1;
+ }
+ priv->rx_wr.next = NULL;
+ priv->rx_wr.sg_list = priv->rx_sge;
+
return 0;
out_free_cq:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 293f5b892e3..431fdeaa2dc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -89,6 +89,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
goto err;
}
+ priv->max_ib_mtu = ppriv->max_ib_mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
priv->pkey = pkey;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 4b07bdadb81..b29e3affb80 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -444,6 +444,23 @@ exit:
__FUNCTION__, retval);
}
+static void xpad_bulk_out(struct urb *urb)
+{
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ break;
+ default:
+ dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+ }
+}
+
#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
static void xpad_irq_out(struct urb *urb)
{
@@ -475,23 +492,6 @@ exit:
__FUNCTION__, retval);
}
-static void xpad_bulk_out(struct urb *urb)
-{
- switch (urb->status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
- break;
- default:
- dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
- }
-}
-
static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
{
struct usb_endpoint_descriptor *ep_irq_out;
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index f972ff377b6..cc9f27514ae 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -114,8 +114,8 @@ static int emumousebtn_input_register(void)
if (!emumousebtn)
return -ENOMEM;
- lockdep_set_class(emumousebtn->event_lock, &emumousebtn_event_class);
- lockdep_set_class(emumousebtn->mutex, &emumousebtn_mutex_class);
+ lockdep_set_class(&emumousebtn->event_lock, &emumousebtn_event_class);
+ lockdep_set_class(&emumousebtn->mutex, &emumousebtn_mutex_class);
emumousebtn->name = "Macintosh mouse button emulation";
emumousebtn->id.bustype = BUS_ADB;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 6477fc66cc2..346223856f5 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -299,7 +299,7 @@ static int stk7700d_tuner_attach(struct dvb_usb_adapter *adap)
}
/* STK7700-PH: Digital/Analog Hybrid Tuner, e.h. Cinergy HT USB HE */
-struct dibx000_agc_config xc3028_agc_config = {
+static struct dibx000_agc_config xc3028_agc_config = {
BAND_VHF | BAND_UHF, /* band_caps */
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0,
@@ -342,7 +342,7 @@ struct dibx000_agc_config xc3028_agc_config = {
};
/* PLL Configuration for COFDM BW_MHz = 8.00 with external clock = 30.00 */
-struct dibx000_bandwidth_config xc3028_bw_config = {
+static struct dibx000_bandwidth_config xc3028_bw_config = {
60000, 30000, /* internal, sampling */
1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */
0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc,
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 68fab616f55..f5fceb3cdb3 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -307,6 +307,14 @@ config DVB_AU8522
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
+config DVB_S5H1411
+ tristate "Samsung S5H1411 based"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
+ to support this frontend.
+
comment "Tuners/PLL support"
depends on DVB_CORE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 2f873fc0f64..9747c73dc82 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -55,3 +55,4 @@ obj-$(CONFIG_DVB_TUNER_XC5000) += xc5000.o
obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
obj-$(CONFIG_DVB_AU8522) += au8522.o
obj-$(CONFIG_DVB_TDA10048) += tda10048.o
+obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
diff --git a/drivers/media/dvb/frontends/mt312.h b/drivers/media/dvb/frontends/mt312.h
index 96338f0c4dd..de796eab391 100644
--- a/drivers/media/dvb/frontends/mt312.h
+++ b/drivers/media/dvb/frontends/mt312.h
@@ -33,7 +33,7 @@ struct mt312_config {
u8 demod_address;
/* inverted voltage setting */
- int voltage_inverted:1;
+ unsigned int voltage_inverted:1;
};
#if defined(CONFIG_DVB_MT312) || (defined(CONFIG_DVB_MT312_MODULE) && defined(MODULE))
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
new file mode 100644
index 00000000000..eb5bfc99d4e
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -0,0 +1,888 @@
+/*
+ Samsung S5H1411 VSB/QAM demodulator driver
+
+ Copyright (C) 2008 Steven Toth <stoth@hauppauge.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "dvb_frontend.h"
+#include "dvb-pll.h"
+#include "s5h1411.h"
+
+struct s5h1411_state {
+
+ struct i2c_adapter *i2c;
+
+ /* configuration settings */
+ const struct s5h1411_config *config;
+
+ struct dvb_frontend frontend;
+
+ fe_modulation_t current_modulation;
+
+ u32 current_frequency;
+ int if_freq;
+
+ u8 inversion;
+};
+
+static int debug;
+
+#define dprintk(arg...) do { \
+ if (debug) \
+ printk(arg); \
+ } while (0)
+
+/* Register values to initialise the demod, defaults to VSB */
+static struct init_tab {
+ u8 addr;
+ u8 reg;
+ u16 data;
+} init_tab[] = {
+ { S5H1411_I2C_TOP_ADDR, 0x00, 0x0071, },
+ { S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, },
+ { S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, },
+ { S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, },
+ { S5H1411_I2C_TOP_ADDR, 0x1f, 0x342a, },
+ { S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, },
+ { S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, },
+ { S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, },
+ { S5H1411_I2C_TOP_ADDR, 0x27, 0x0f04, },
+ { S5H1411_I2C_TOP_ADDR, 0x28, 0x070f, },
+ { S5H1411_I2C_TOP_ADDR, 0x29, 0x2820, },
+ { S5H1411_I2C_TOP_ADDR, 0x2a, 0x102e, },
+ { S5H1411_I2C_TOP_ADDR, 0x2b, 0x0220, },
+ { S5H1411_I2C_TOP_ADDR, 0x2e, 0x0d0e, },
+ { S5H1411_I2C_TOP_ADDR, 0x2f, 0x1013, },
+ { S5H1411_I2C_TOP_ADDR, 0x31, 0x171b, },
+ { S5H1411_I2C_TOP_ADDR, 0x32, 0x0e0f, },
+ { S5H1411_I2C_TOP_ADDR, 0x33, 0x0f10, },
+ { S5H1411_I2C_TOP_ADDR, 0x34, 0x170e, },
+ { S5H1411_I2C_TOP_ADDR, 0x35, 0x4b10, },
+ { S5H1411_I2C_TOP_ADDR, 0x36, 0x0f17, },
+ { S5H1411_I2C_TOP_ADDR, 0x3c, 0x1577, },
+ { S5H1411_I2C_TOP_ADDR, 0x3d, 0x081a, },
+ { S5H1411_I2C_TOP_ADDR, 0x3e, 0x77ee, },
+ { S5H1411_I2C_TOP_ADDR, 0x40, 0x1e09, },
+ { S5H1411_I2C_TOP_ADDR, 0x41, 0x0f0c, },
+ { S5H1411_I2C_TOP_ADDR, 0x42, 0x1f10, },
+ { S5H1411_I2C_TOP_ADDR, 0x4d, 0x0509, },
+ { S5H1411_I2C_TOP_ADDR, 0x4e, 0x0a00, },
+ { S5H1411_I2C_TOP_ADDR, 0x50, 0x0000, },
+ { S5H1411_I2C_TOP_ADDR, 0x5b, 0x0000, },
+ { S5H1411_I2C_TOP_ADDR, 0x5c, 0x0008, },
+ { S5H1411_I2C_TOP_ADDR, 0x57, 0x1101, },
+ { S5H1411_I2C_TOP_ADDR, 0x65, 0x007c, },
+ { S5H1411_I2C_TOP_ADDR, 0x68, 0x0512, },
+ { S5H1411_I2C_TOP_ADDR, 0x69, 0x0258, },
+ { S5H1411_I2C_TOP_ADDR, 0x70, 0x0004, },
+ { S5H1411_I2C_TOP_ADDR, 0x71, 0x0007, },
+ { S5H1411_I2C_TOP_ADDR, 0x76, 0x00a9, },
+ { S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, },
+ { S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, },
+ { S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, },
+ { S5H1411_I2C_TOP_ADDR, 0xb5, 0xafbb, },
+ { S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, },
+ { S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, },
+ { S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, },
+ { S5H1411_I2C_TOP_ADDR, 0xb8, 0x003f, },
+ { S5H1411_I2C_TOP_ADDR, 0xb9, 0x2700, },
+ { S5H1411_I2C_TOP_ADDR, 0xba, 0xfac8, },
+ { S5H1411_I2C_TOP_ADDR, 0xbe, 0x1003, },
+ { S5H1411_I2C_TOP_ADDR, 0xbf, 0x103f, },
+ { S5H1411_I2C_TOP_ADDR, 0xce, 0x2000, },
+ { S5H1411_I2C_TOP_ADDR, 0xcf, 0x0800, },
+ { S5H1411_I2C_TOP_ADDR, 0xd0, 0x0800, },
+ { S5H1411_I2C_TOP_ADDR, 0xd1, 0x0400, },
+ { S5H1411_I2C_TOP_ADDR, 0xd2, 0x0800, },
+ { S5H1411_I2C_TOP_ADDR, 0xd3, 0x2000, },
+ { S5H1411_I2C_TOP_ADDR, 0xd4, 0x3000, },
+ { S5H1411_I2C_TOP_ADDR, 0xdb, 0x4a9b, },
+ { S5H1411_I2C_TOP_ADDR, 0xdc, 0x1000, },
+ { S5H1411_I2C_TOP_ADDR, 0xde, 0x0001, },
+ { S5H1411_I2C_TOP_ADDR, 0xdf, 0x0000, },
+ { S5H1411_I2C_TOP_ADDR, 0xe3, 0x0301, },
+ { S5H1411_I2C_QAM_ADDR, 0xf3, 0x0000, },
+ { S5H1411_I2C_QAM_ADDR, 0xf3, 0x0001, },
+ { S5H1411_I2C_QAM_ADDR, 0x08, 0x0600, },
+ { S5H1411_I2C_QAM_ADDR, 0x18, 0x4201, },
+ { S5H1411_I2C_QAM_ADDR, 0x1e, 0x6476, },
+ { S5H1411_I2C_QAM_ADDR, 0x21, 0x0830, },
+ { S5H1411_I2C_QAM_ADDR, 0x0c, 0x5679, },
+ { S5H1411_I2C_QAM_ADDR, 0x0d, 0x579b, },
+ { S5H1411_I2C_QAM_ADDR, 0x24, 0x0102, },
+ { S5H1411_I2C_QAM_ADDR, 0x31, 0x7488, },
+ { S5H1411_I2C_QAM_ADDR, 0x32, 0x0a08, },
+ { S5H1411_I2C_QAM_ADDR, 0x3d, 0x8689, },
+ { S5H1411_I2C_QAM_ADDR, 0x49, 0x0048, },
+ { S5H1411_I2C_QAM_ADDR, 0x57, 0x2012, },
+ { S5H1411_I2C_QAM_ADDR, 0x5d, 0x7676, },
+ { S5H1411_I2C_QAM_ADDR, 0x04, 0x0400, },
+ { S5H1411_I2C_QAM_ADDR, 0x58, 0x00c0, },
+ { S5H1411_I2C_QAM_ADDR, 0x5b, 0x0100, },
+};
+
+/* VSB SNR lookup table */
+static struct vsb_snr_tab {
+ u16 val;
+ u16 data;
+} vsb_snr_tab[] = {
+ { 0x39f, 300, },
+ { 0x39b, 295, },
+ { 0x397, 290, },
+ { 0x394, 285, },
+ { 0x38f, 280, },
+ { 0x38b, 275, },
+ { 0x387, 270, },
+ { 0x382, 265, },
+ { 0x37d, 260, },
+ { 0x377, 255, },
+ { 0x370, 250, },
+ { 0x36a, 245, },
+ { 0x364, 240, },
+ { 0x35b, 235, },
+ { 0x353, 230, },
+ { 0x349, 225, },
+ { 0x340, 320, },
+ { 0x337, 215, },
+ { 0x327, 210, },
+ { 0x31b, 205, },
+ { 0x310, 200, },
+ { 0x302, 195, },
+ { 0x2f3, 190, },
+ { 0x2e4, 185, },
+ { 0x2d7, 180, },
+ { 0x2cd, 175, },
+ { 0x2bb, 170, },
+ { 0x2a9, 165, },
+ { 0x29e, 160, },
+ { 0x284, 155, },
+ { 0x27a, 150, },
+ { 0x260, 145, },
+ { 0x23a, 140, },
+ { 0x224, 135, },
+ { 0x213, 130, },
+ { 0x204, 125, },
+ { 0x1fe, 120, },
+ { 0, 0, },
+};
+
+/* QAM64 SNR lookup table */
+static struct qam64_snr_tab {
+ u16 val;
+ u16 data;
+} qam64_snr_tab[] = {
+ { 0x0001, 0, },
+ { 0x0af0, 300, },
+ { 0x0d80, 290, },
+ { 0x10a0, 280, },
+ { 0x14b5, 270, },
+ { 0x1590, 268, },
+ { 0x1680, 266, },
+ { 0x17b0, 264, },
+ { 0x18c0, 262, },
+ { 0x19b0, 260, },
+ { 0x1ad0, 258, },
+ { 0x1d00, 256, },
+ { 0x1da0, 254, },
+ { 0x1ef0, 252, },
+ { 0x2050, 250, },
+ { 0x20f0, 249, },
+ { 0x21d0, 248, },
+ { 0x22b0, 247, },
+ { 0x23a0, 246, },
+ { 0x2470, 245, },
+ { 0x24f0, 244, },
+ { 0x25a0, 243, },
+ { 0x26c0, 242, },
+ { 0x27b0, 241, },
+ { 0x28d0, 240, },
+ { 0x29b0, 239, },
+ { 0x2ad0, 238, },
+ { 0x2ba0, 237, },
+ { 0x2c80, 236, },
+ { 0x2d20, 235, },
+ { 0x2e00, 234, },
+ { 0x2f10, 233, },
+ { 0x3050, 232, },
+ { 0x3190, 231, },
+ { 0x3300, 230, },
+ { 0x3340, 229, },
+ { 0x3200, 228, },
+ { 0x3550, 227, },
+ { 0x3610, 226, },
+ { 0x3600, 225, },
+ { 0x3700, 224, },
+ { 0x3800, 223, },
+ { 0x3920, 222, },
+ { 0x3a20, 221, },
+ { 0x3b30, 220, },
+ { 0x3d00, 219, },
+ { 0x3e00, 218, },
+ { 0x4000, 217, },
+ { 0x4100, 216, },
+ { 0x4300, 215, },
+ { 0x4400, 214, },
+ { 0x4600, 213, },
+ { 0x4700, 212, },
+ { 0x4800, 211, },
+ { 0x4a00, 210, },
+ { 0x4b00, 209, },
+ { 0x4d00, 208, },
+ { 0x4f00, 207, },
+ { 0x5050, 206, },
+ { 0x5200, 205, },
+ { 0x53c0, 204, },
+ { 0x5450, 203, },
+ { 0x5650, 202, },
+ { 0x5820, 201, },
+ { 0x6000, 200, },
+ { 0xffff, 0, },
+};
+
+/* QAM256 SNR lookup table */
+static struct qam256_snr_tab {
+ u16 val;
+ u16 data;
+} qam256_snr_tab[] = {
+ { 0x0001, 0, },
+ { 0x0970, 400, },
+ { 0x0a90, 390, },
+ { 0x0b90, 380, },
+ { 0x0d90, 370, },
+ { 0x0ff0, 360, },
+ { 0x1240, 350, },
+ { 0x1345, 348, },
+ { 0x13c0, 346, },
+ { 0x14c0, 344, },
+ { 0x1500, 342, },
+ { 0x1610, 340, },
+ { 0x1700, 338, },
+ { 0x1800, 336, },
+ { 0x18b0, 334, },
+ { 0x1900, 332, },
+ { 0x1ab0, 330, },
+ { 0x1bc0, 328, },
+ { 0x1cb0, 326, },
+ { 0x1db0, 324, },
+ { 0x1eb0, 322, },
+ { 0x2030, 320, },
+ { 0x2200, 318, },
+ { 0x2280, 316, },
+ { 0x2410, 314, },
+ { 0x25b0, 312, },
+ { 0x27a0, 310, },
+ { 0x2840, 308, },
+ { 0x29d0, 306, },
+ { 0x2b10, 304, },
+ { 0x2d30, 302, },
+ { 0x2f20, 300, },
+ { 0x30c0, 298, },
+ { 0x3260, 297, },
+ { 0x32c0, 296, },
+ { 0x3300, 295, },
+ { 0x33b0, 294, },
+ { 0x34b0, 293, },
+ { 0x35a0, 292, },
+ { 0x3650, 291, },
+ { 0x3800, 290, },
+ { 0x3900, 289, },
+ { 0x3a50, 288, },
+ { 0x3b30, 287, },
+ { 0x3cb0, 286, },
+ { 0x3e20, 285, },
+ { 0x3fa0, 284, },
+ { 0x40a0, 283, },
+ { 0x41c0, 282, },
+ { 0x42f0, 281, },
+ { 0x44a0, 280, },
+ { 0x4600, 279, },
+ { 0x47b0, 278, },
+ { 0x4900, 277, },
+ { 0x4a00, 276, },
+ { 0x4ba0, 275, },
+ { 0x4d00, 274, },
+ { 0x4f00, 273, },
+ { 0x5000, 272, },
+ { 0x51f0, 272, },
+ { 0x53a0, 270, },
+ { 0x5520, 269, },
+ { 0x5700, 268, },
+ { 0x5800, 267, },
+ { 0x5a00, 266, },
+ { 0x5c00, 265, },
+ { 0x5d00, 264, },
+ { 0x5f00, 263, },
+ { 0x6000, 262, },
+ { 0x6200, 261, },
+ { 0x6400, 260, },
+ { 0xffff, 0, },
+};
+
+/* 8 bit registers, 16 bit values */
+static int s5h1411_writereg(struct s5h1411_state *state,
+ u8 addr, u8 reg, u16 data)
+{
+ int ret;
+ u8 buf [] = { reg, data >> 8, data & 0xff };
+
+ struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 };
+
+ ret = i2c_transfer(state->i2c, &msg, 1);
+
+ if (ret != 1)
+ printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
+ "ret == %i)\n", __func__, addr, reg, data, ret);
+
+ return (ret != 1) ? -1 : 0;
+}
+
+static u16 s5h1411_readreg(struct s5h1411_state *state, u8 addr, u8 reg)
+{
+ int ret;
+ u8 b0 [] = { reg };
+ u8 b1 [] = { 0, 0 };
+
+ struct i2c_msg msg [] = {
+ { .addr = addr, .flags = 0, .buf = b0, .len = 1 },
+ { .addr = addr, .flags = I2C_M_RD, .buf = b1, .len = 2 } };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+
+ if (ret != 2)
+ printk(KERN_ERR "%s: readreg error (ret == %i)\n",
+ __func__, ret);
+ return (b1[0] << 8) | b1[1];
+}
+
+static int s5h1411_softreset(struct dvb_frontend *fe)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+
+ dprintk("%s()\n", __func__);
+
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf7, 0);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf7, 1);
+ return 0;
+}
+
+static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+
+ dprintk("%s(%d KHz)\n", __func__, KHz);
+
+ switch (KHz) {
+ case 3250:
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d9);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342);
+ s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9);
+ break;
+ case 3500:
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1225);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x1e96);
+ s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x1225);
+ break;
+ case 4000:
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x14bc);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0xb53e);
+ s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x14bd);
+ break;
+ default:
+ dprintk("%s(%d KHz) Invalid, defaulting to 5380\n",
+ __func__, KHz);
+ /* no break, need to continue */
+ case 5380:
+ case 44000:
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1be4);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x3655);
+ s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x1be4);
+ break;
+ }
+
+ state->if_freq = KHz;
+
+ return 0;
+}
+
+static int s5h1411_set_mpeg_timing(struct dvb_frontend *fe, int mode)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+ u16 val;
+
+ dprintk("%s(%d)\n", __func__, mode);
+
+ val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xbe) & 0xcfff;
+ switch (mode) {
+ case S5H1411_MPEGTIMING_CONTINOUS_INVERTING_CLOCK:
+ val |= 0x0000;
+ break;
+ case S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK:
+ dprintk("%s(%d) Mode1 or Defaulting\n", __func__, mode);
+ val |= 0x1000;
+ break;
+ case S5H1411_MPEGTIMING_NONCONTINOUS_INVERTING_CLOCK:
+ val |= 0x2000;
+ break;
+ case S5H1411_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK:
+ val |= 0x3000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Configure MPEG Signal Timing charactistics */
+ return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbe, val);
+}
+
+static int s5h1411_set_spectralinversion(struct dvb_frontend *fe, int inversion)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+ u16 val;
+
+ dprintk("%s(%d)\n", __func__, inversion);
+ val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x24) & ~0x1000;
+
+ if (inversion == 1)
+ val |= 0x1000; /* Inverted */
+ else
+ val |= 0x0000;
+
+ state->inversion = inversion;
+ return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val);
+}
+
+static int s5h1411_enable_modulation(struct dvb_frontend *fe,
+ fe_modulation_t m)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+
+ dprintk("%s(0x%08x)\n", __func__, m);
+
+ switch (m) {
+ case VSB_8:
+ dprintk("%s() VSB_8\n", __func__);
+ s5h1411_set_if_freq(fe, state->config->vsb_if);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x71);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf6, 0x00);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xcd, 0xf1);
+ break;
+ case QAM_64:
+ case QAM_256:
+ dprintk("%s() QAM_AUTO (64/256)\n", __func__);
+ s5h1411_set_if_freq(fe, state->config->qam_if);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x0171);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf6, 0x0001);
+ s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x16, 0x1101);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xcd, 0x00f0);
+ break;
+ default:
+ dprintk("%s() Invalid modulation\n", __func__);
+ return -EINVAL;
+ }
+
+ state->current_modulation = m;
+ s5h1411_softreset(fe);
+
+ return 0;
+}
+
+static int s5h1411_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+
+ dprintk("%s(%d)\n", __func__, enable);
+
+ if (enable)
+ return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1);
+ else
+ return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 0);
+}
+
+static int s5h1411_set_gpio(struct dvb_frontend *fe, int enable)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+ u16 val;
+
+ dprintk("%s(%d)\n", __func__, enable);
+
+ val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xe0) & ~0x02;
+
+ if (enable)
+ return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0,
+ val | 0x02);
+ else
+ return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val);
+}
+
+static int s5h1411_sleep(struct dvb_frontend *fe, int enable)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+
+ dprintk("%s(%d)\n", __func__, enable);
+
+ if (enable)
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf4, 1);
+ else {
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf4, 0);
+ s5h1411_softreset(fe);
+ }
+
+ return 0;
+}
+
+static int s5h1411_register_reset(struct dvb_frontend *fe)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+
+ dprintk("%s()\n", __func__);
+
+ return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf3, 0);
+}
+
+/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
+static int s5h1411_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+
+ dprintk("%s(frequency=%d)\n", __func__, p->frequency);
+
+ s5h1411_softreset(fe);
+
+ state->current_frequency = p->frequency;
+
+ s5h1411_enable_modulation(fe, p->u.vsb.modulation);
+
+ /* Allow the demod to settle */
+ msleep(100);
+
+ if (fe->ops.tuner_ops.set_params) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ fe->ops.tuner_ops.set_params(fe, p);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ return 0;
+}
+
+/* Reset the demod hardware and reset all of the configuration registers
+ to a default state. */
+static int s5h1411_init(struct dvb_frontend *fe)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+ int i;
+
+ dprintk("%s()\n", __func__);
+
+ s5h1411_sleep(fe, 0);
+ s5h1411_register_reset(fe);
+
+ for (i = 0; i < ARRAY_SIZE(init_tab); i++)
+ s5h1411_writereg(state, init_tab[i].addr,
+ init_tab[i].reg,
+ init_tab[i].data);
+
+ /* The datasheet says that after initialisation, VSB is default */
+ state->current_modulation = VSB_8;
+
+ if (state->config->output_mode == S5H1411_SERIAL_OUTPUT)
+ /* Serial */
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1101);
+ else
+ /* Parallel */
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1001);
+
+ s5h1411_set_spectralinversion(fe, state->config->inversion);
+ s5h1411_set_if_freq(fe, state->config->vsb_if);
+ s5h1411_set_gpio(fe, state->config->gpio);
+ s5h1411_set_mpeg_timing(fe, state->config->mpeg_timing);
+ s5h1411_softreset(fe);
+
+ /* Note: Leaving the I2C gate closed. */
+ s5h1411_i2c_gate_ctrl(fe, 0);
+
+ return 0;
+}
+
+static int s5h1411_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+ u16 reg;
+ u32 tuner_status = 0;
+
+ *status = 0;
+
+ /* Get the demodulator status */
+ reg = (s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2) >> 15)
+ & 0x0001;
+ if (reg)
+ *status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_SIGNAL;
+
+ switch (state->current_modulation) {
+ case QAM_64:
+ case QAM_256:
+ reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0);
+ if (reg & 0x100)
+ *status |= FE_HAS_VITERBI;
+ if (reg & 0x10)
+ *status |= FE_HAS_SYNC;
+ break;
+ case VSB_8:
+ reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x5e);
+ if (reg & 0x0001)
+ *status |= FE_HAS_SYNC;
+ reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2);
+ if (reg & 0x1000)
+ *status |= FE_HAS_VITERBI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (state->config->status_mode) {
+ case S5H1411_DEMODLOCKING:
+ if (*status & FE_HAS_VITERBI)
+ *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
+ break;
+ case S5H1411_TUNERLOCKING:
+ /* Get the tuner status */
+ if (fe->ops.tuner_ops.get_status) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ fe->ops.tuner_ops.get_status(fe, &tuner_status);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+ if (tuner_status)
+ *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
+ break;
+ }
+
+ dprintk("%s() status 0x%08x\n", __func__, *status);
+
+ return 0;
+}
+
+static int s5h1411_qam256_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
+{
+ int i, ret = -EINVAL;
+ dprintk("%s()\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(qam256_snr_tab); i++) {
+ if (v < qam256_snr_tab[i].val) {
+ *snr = qam256_snr_tab[i].data;
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int s5h1411_qam64_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
+{
+ int i, ret = -EINVAL;
+ dprintk("%s()\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(qam64_snr_tab); i++) {
+ if (v < qam64_snr_tab[i].val) {
+ *snr = qam64_snr_tab[i].data;
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int s5h1411_vsb_lookup_snr(struct dvb_frontend *fe, u16 *snr, u16 v)
+{
+ int i, ret = -EINVAL;
+ dprintk("%s()\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(vsb_snr_tab); i++) {
+ if (v > vsb_snr_tab[i].val) {
+ *snr = vsb_snr_tab[i].data;
+ ret = 0;
+ break;
+ }
+ }
+ dprintk("%s() snr=%d\n", __func__, *snr);
+ return ret;
+}
+
+static int s5h1411_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+ u16 reg;
+ dprintk("%s()\n", __func__);
+
+ switch (state->current_modulation) {
+ case QAM_64:
+ reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf1);
+ return s5h1411_qam64_lookup_snr(fe, snr, reg);
+ case QAM_256:
+ reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf1);
+ return s5h1411_qam256_lookup_snr(fe, snr, reg);
+ case VSB_8:
+ reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR,
+ 0xf2) & 0x3ff;
+ return s5h1411_vsb_lookup_snr(fe, snr, reg);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int s5h1411_read_signal_strength(struct dvb_frontend *fe,
+ u16 *signal_strength)
+{
+ return s5h1411_read_snr(fe, signal_strength);
+}
+
+static int s5h1411_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+
+ *ucblocks = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xc9);
+
+ return 0;
+}
+
+static int s5h1411_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ return s5h1411_read_ucblocks(fe, ber);
+}
+
+static int s5h1411_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+
+ p->frequency = state->current_frequency;
+ p->u.vsb.modulation = state->current_modulation;
+
+ return 0;
+}
+
+static int s5h1411_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *tune)
+{
+ tune->min_delay_ms = 1000;
+ return 0;
+}
+
+static void s5h1411_release(struct dvb_frontend *fe)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+ kfree(state);
+}
+
+static struct dvb_frontend_ops s5h1411_ops;
+
+struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct s5h1411_state *state = NULL;
+ u16 reg;
+
+ /* allocate memory for the internal state */
+ state = kmalloc(sizeof(struct s5h1411_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ /* setup the state */
+ state->config = config;
+ state->i2c = i2c;
+ state->current_modulation = VSB_8;
+ state->inversion = state->config->inversion;
+
+ /* check if the demod exists */
+ reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x05);
+ if (reg != 0x0066)
+ goto error;
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops, &s5h1411_ops,
+ sizeof(struct dvb_frontend_ops));
+
+ state->frontend.demodulator_priv = state;
+
+ if (s5h1411_init(&state->frontend) != 0) {
+ printk(KERN_ERR "%s: Failed to initialize correctly\n",
+ __func__);
+ goto error;
+ }
+
+ /* Note: Leaving the I2C gate open here. */
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xf5, 1);
+
+ return &state->frontend;
+
+error:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(s5h1411_attach);
+
+static struct dvb_frontend_ops s5h1411_ops = {
+
+ .info = {
+ .name = "Samsung S5H1411 QAM/8VSB Frontend",
+ .type = FE_ATSC,
+ .frequency_min = 54000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 62500,
+ .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
+ },
+
+ .init = s5h1411_init,
+ .i2c_gate_ctrl = s5h1411_i2c_gate_ctrl,
+ .set_frontend = s5h1411_set_frontend,
+ .get_frontend = s5h1411_get_frontend,
+ .get_tune_settings = s5h1411_get_tune_settings,
+ .read_status = s5h1411_read_status,
+ .read_ber = s5h1411_read_ber,
+ .read_signal_strength = s5h1411_read_signal_strength,
+ .read_snr = s5h1411_read_snr,
+ .read_ucblocks = s5h1411_read_ucblocks,
+ .release = s5h1411_release,
+};
+
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable verbose debug messages");
+
+MODULE_DESCRIPTION("Samsung S5H1411 QAM-B/ATSC Demodulator driver");
+MODULE_AUTHOR("Steven Toth");
+MODULE_LICENSE("GPL");
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ */
diff --git a/drivers/media/dvb/frontends/s5h1411.h b/drivers/media/dvb/frontends/s5h1411.h
new file mode 100644
index 00000000000..1855f64ed4d
--- /dev/null
+++ b/drivers/media/dvb/frontends/s5h1411.h
@@ -0,0 +1,90 @@
+/*
+ Samsung S5H1411 VSB/QAM demodulator driver
+
+ Copyright (C) 2008 Steven Toth <stoth@hauppauge.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef __S5H1411_H__
+#define __S5H1411_H__
+
+#include <linux/dvb/frontend.h>
+
+#define S5H1411_I2C_TOP_ADDR (0x32 >> 1)
+#define S5H1411_I2C_QAM_ADDR (0x34 >> 1)
+
+struct s5h1411_config {
+
+ /* serial/parallel output */
+#define S5H1411_PARALLEL_OUTPUT 0
+#define S5H1411_SERIAL_OUTPUT 1
+ u8 output_mode;
+
+ /* GPIO Setting */
+#define S5H1411_GPIO_OFF 0
+#define S5H1411_GPIO_ON 1
+ u8 gpio;
+
+ /* MPEG signal timing */
+#define S5H1411_MPEGTIMING_CONTINOUS_INVERTING_CLOCK 0
+#define S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK 1
+#define S5H1411_MPEGTIMING_NONCONTINOUS_INVERTING_CLOCK 2
+#define S5H1411_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK 3
+ u16 mpeg_timing;
+
+ /* IF Freq for QAM and VSB in KHz */
+#define S5H1411_IF_2500 2500
+#define S5H1411_IF_3500 3500
+#define S5H1411_IF_4000 4000
+#define S5H1411_IF_5380 5380
+#define S5H1411_IF_44000 44000
+#define S5H1411_VSB_IF_DEFAULT S5H1411_IF_44000
+#define S5H1411_QAM_IF_DEFAULT S5H1411_IF_44000
+ u16 qam_if;
+ u16 vsb_if;
+
+ /* Spectral Inversion */
+#define S5H1411_INVERSION_OFF 0
+#define S5H1411_INVERSION_ON 1
+ u8 inversion;
+
+ /* Return lock status based on tuner lock, or demod lock */
+#define S5H1411_TUNERLOCKING 0
+#define S5H1411_DEMODLOCKING 1
+ u8 status_mode;
+};
+
+#if defined(CONFIG_DVB_S5H1411) || \
+ (defined(CONFIG_DVB_S5H1411_MODULE) && defined(MODULE))
+extern struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *s5h1411_attach(
+ const struct s5h1411_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_S5H1411 */
+
+#endif /* __S5H1411_H__ */
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ */
diff --git a/drivers/media/video/au0828/Kconfig b/drivers/media/video/au0828/Kconfig
index c97c4bd2484..41708267e7a 100644
--- a/drivers/media/video/au0828/Kconfig
+++ b/drivers/media/video/au0828/Kconfig
@@ -1,7 +1,7 @@
config VIDEO_AU0828
tristate "Auvitek AU0828 support"
- depends on VIDEO_DEV && I2C && INPUT
+ depends on VIDEO_DEV && I2C && INPUT && DVB_CORE
select I2C_ALGOBIT
select DVB_AU8522 if !DVB_FE_CUSTOMIZE
select DVB_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 8ca91f81427..a2a6983444f 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -36,7 +36,6 @@ struct au0828_board au0828_boards[] = {
.name = "DViCO FusionHDTV USB",
},
};
-const unsigned int au0828_bcount = ARRAY_SIZE(au0828_boards);
/* Tuner callback function for au0828 boards. Currently only needed
* for HVR1500Q, which has an xc5000 tuner.
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index e65d5642cb1..54bfc0f0529 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -32,18 +32,10 @@
* 4 = I2C related
* 8 = Bridge related
*/
-unsigned int debug;
-module_param(debug, int, 0644);
+int au0828_debug;
+module_param_named(debug, au0828_debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
-unsigned int usb_debug;
-module_param(usb_debug, int, 0644);
-MODULE_PARM_DESC(usb_debug, "enable usb debug messages");
-
-unsigned int bridge_debug;
-module_param(bridge_debug, int, 0644);
-MODULE_PARM_DESC(bridge_debug, "enable bridge debug messages");
-
#define _AU0828_BULKPIPE 0x03
#define _BULKPIPESIZE 0xffff
@@ -229,24 +221,18 @@ static int __init au0828_init(void)
{
int ret;
- if (debug)
+ if (au0828_debug & 1)
printk(KERN_INFO "%s() Debugging is enabled\n", __func__);
- if (usb_debug) {
+ if (au0828_debug & 2)
printk(KERN_INFO "%s() USB Debugging is enabled\n", __func__);
- debug |= 2;
- }
- if (i2c_debug) {
+ if (au0828_debug & 4)
printk(KERN_INFO "%s() I2C Debugging is enabled\n", __func__);
- debug |= 4;
- }
- if (bridge_debug) {
+ if (au0828_debug & 8)
printk(KERN_INFO "%s() Bridge Debugging is enabled\n",
__func__);
- debug |= 8;
- }
printk(KERN_INFO "au0828 driver loaded\n");
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index 85d0ae9a322..5040d7fc4af 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -204,7 +204,7 @@ static int au0828_dvb_stop_feed(struct dvb_demux_feed *feed)
return ret;
}
-int dvb_register(struct au0828_dev *dev)
+static int dvb_register(struct au0828_dev *dev)
{
struct au0828_dvb *dvb = &dev->dvb;
int result;
diff --git a/drivers/media/video/au0828/au0828-i2c.c b/drivers/media/video/au0828/au0828-i2c.c
index 94c8b74a665..741a4937b05 100644
--- a/drivers/media/video/au0828/au0828-i2c.c
+++ b/drivers/media/video/au0828/au0828-i2c.c
@@ -29,11 +29,7 @@
#include <media/v4l2-common.h>
-unsigned int i2c_debug;
-module_param(i2c_debug, int, 0444);
-MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
-
-unsigned int i2c_scan;
+static int i2c_scan;
module_param(i2c_scan, int, 0444);
MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
index 0200b9fc5dc..7beb571798e 100644
--- a/drivers/media/video/au0828/au0828.h
+++ b/drivers/media/video/au0828/au0828.h
@@ -96,15 +96,12 @@ struct au0828_buff {
/* au0828-core.c */
extern u32 au0828_read(struct au0828_dev *dev, u16 reg);
extern u32 au0828_write(struct au0828_dev *dev, u16 reg, u32 val);
-extern unsigned int debug;
-extern unsigned int usb_debug;
-extern unsigned int bridge_debug;
+extern int au0828_debug;
/* ----------------------------------------------------------- */
/* au0828-cards.c */
extern struct au0828_board au0828_boards[];
extern struct usb_device_id au0828_usb_id_table[];
-extern const unsigned int au0828_bcount;
extern void au0828_gpio_setup(struct au0828_dev *dev);
extern int au0828_tuner_callback(void *priv, int command, int arg);
extern void au0828_card_setup(struct au0828_dev *dev);
@@ -115,7 +112,6 @@ extern int au0828_i2c_register(struct au0828_dev *dev);
extern int au0828_i2c_unregister(struct au0828_dev *dev);
extern void au0828_call_i2c_clients(struct au0828_dev *dev,
unsigned int cmd, void *arg);
-extern unsigned int i2c_debug;
/* ----------------------------------------------------------- */
/* au0828-dvb.c */
@@ -123,6 +119,6 @@ extern int au0828_dvb_register(struct au0828_dev *dev);
extern void au0828_dvb_unregister(struct au0828_dev *dev);
#define dprintk(level, fmt, arg...)\
- do { if (debug & level)\
+ do { if (au0828_debug & level)\
printk(KERN_DEBUG DRIVER_NAME "/0: " fmt, ## arg);\
} while (0)
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 870d6e197d6..f05649727b6 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -191,7 +191,7 @@ static struct tda18271_config hauppauge_hvr1200_tuner_config = {
.gate = TDA18271_GATE_ANALOG,
};
-struct dibx000_agc_config xc3028_agc_config = {
+static struct dibx000_agc_config xc3028_agc_config = {
BAND_VHF | BAND_UHF, /* band_caps */
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0,
@@ -237,7 +237,7 @@ struct dibx000_agc_config xc3028_agc_config = {
/* PLL Configuration for COFDM BW_MHz = 8.000000
* With external clock = 30.000000 */
-struct dibx000_bandwidth_config xc3028_bw_config = {
+static struct dibx000_bandwidth_config xc3028_bw_config = {
60000, /* internal */
30000, /* sampling */
1, /* pll_cfg: prediv */
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index bcf6d9ba063..27635cdcbaf 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -58,6 +58,7 @@ config VIDEO_CX88_DVB
select DVB_CX24123 if !DVB_FE_CUSTOMISE
select DVB_ISL6421 if !DVB_FE_CUSTOMISE
select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
+ select DVB_S5H1411 if !DVB_FE_CUSTOMISE
---help---
This adds support for DVB/ATSC cards based on the
Conexant 2388x chip.
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 61c4f72644b..6c0c94c5ef9 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -546,10 +546,12 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
if (retval < 0)
return retval;
- dev->mailbox = blackbird_find_mailbox(dev);
- if (dev->mailbox < 0)
+ retval = blackbird_find_mailbox(dev);
+ if (retval < 0)
return -1;
+ dev->mailbox = retval;
+
retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
dprintk(0, "ERROR: Firmware ping failed!\n");
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 620159d0550..2b6b283cda1 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1591,6 +1591,7 @@ static const struct cx88_board cx88_boards[] = {
.vmux = 2,
.gpio0 = 0x16d9,
}},
+ .mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PROLINK_PV_8000GT] = {
.name = "Prolink Pixelview MPEG 8000GT",
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index f1251b844e0..1c7fe6862a6 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -47,6 +47,7 @@
#include "isl6421.h"
#include "tuner-simple.h"
#include "tda9887.h"
+#include "s5h1411.h"
MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
@@ -463,6 +464,22 @@ static struct zl10353_config cx88_geniatech_x8000_mt = {
.no_tuner = 1,
};
+static struct s5h1411_config dvico_fusionhdtv7_config = {
+ .output_mode = S5H1411_SERIAL_OUTPUT,
+ .gpio = S5H1411_GPIO_ON,
+ .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+ .qam_if = S5H1411_IF_44000,
+ .vsb_if = S5H1411_IF_44000,
+ .inversion = S5H1411_INVERSION_OFF,
+ .status_mode = S5H1411_DEMODLOCKING
+};
+
+static struct xc5000_config dvico_fusionhdtv7_tuner_config = {
+ .i2c_address = 0xc2 >> 1,
+ .if_khz = 5380,
+ .tuner_callback = cx88_tuner_callback,
+};
+
static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
{
struct dvb_frontend *fe;
@@ -844,6 +861,21 @@ static int dvb_register(struct cx8802_dev *dev)
if (attach_xc3028(0x61, dev) < 0)
return -EINVAL;
break;
+ case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
+ dev->dvb.frontend = dvb_attach(s5h1411_attach,
+ &dvico_fusionhdtv7_config,
+ &dev->core->i2c_adap);
+ if (dev->dvb.frontend != NULL) {
+ /* tuner_config.video_dev must point to
+ * i2c_adap.algo_data
+ */
+ dvico_fusionhdtv7_tuner_config.priv =
+ dev->core->i2c_adap.algo_data;
+ dvb_attach(xc5000_attach, dev->dvb.frontend,
+ &dev->core->i2c_adap,
+ &dvico_fusionhdtv7_tuner_config);
+ }
+ break;
default:
printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
dev->core->name);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index f8c41d8c74c..5d837c16ee2 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -650,7 +650,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
GFP_KERNEL);
- if (!dev->isoc_ctl.urb) {
+ if (!dev->isoc_ctl.transfer_buffer) {
em28xx_errdev("cannot allocate memory for usbtransfer\n");
kfree(dev->isoc_ctl.urb);
return -ENOMEM;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 11c5fdedc23..7b65f5e537f 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -509,8 +509,11 @@ static int ir_probe(struct i2c_adapter *adap)
static const int probe_cx88[] = { 0x18, 0x6b, 0x71, -1 };
static const int probe_cx23885[] = { 0x6b, -1 };
const int *probe;
- struct i2c_client *c;
- unsigned char buf;
+ struct i2c_msg msg = {
+ .flags = I2C_M_RD,
+ .len = 0,
+ .buf = NULL,
+ };
int i, rc;
switch (adap->id) {
@@ -536,23 +539,17 @@ static int ir_probe(struct i2c_adapter *adap)
return 0;
}
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return -ENOMEM;
-
- c->adapter = adap;
for (i = 0; -1 != probe[i]; i++) {
- c->addr = probe[i];
- rc = i2c_master_recv(c, &buf, 0);
+ msg.addr = probe[i];
+ rc = i2c_transfer(adap, &msg, 1);
dprintk(1,"probe 0x%02x @ %s: %s\n",
probe[i], adap->name,
- (0 == rc) ? "yes" : "no");
- if (0 == rc) {
+ (1 == rc) ? "yes" : "no");
+ if (1 == rc) {
ir_attach(adap, probe[i], 0, 0);
break;
}
}
- kfree(c);
return 0;
}
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig
index a8da90f69dd..158b3d0c653 100644
--- a/drivers/media/video/pvrusb2/Kconfig
+++ b/drivers/media/video/pvrusb2/Kconfig
@@ -64,6 +64,7 @@ config VIDEO_PVRUSB2_DVB
depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL
select DVB_LGDT330X if !DVB_FE_CUSTOMISE
select DVB_S5H1409 if !DVB_FE_CUSTOMISE
+ select DVB_S5H1411 if !DVB_FE_CUSTOMISE
select DVB_TDA10048 if !DVB_FE_CUSTOMIZE
select DVB_TDA18271 if !DVB_FE_CUSTOMIZE
select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
index 2dd06a90adc..3a141d93e1a 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
@@ -36,6 +36,7 @@ pvr2_device_desc structures.
#include "pvrusb2-hdw-internal.h"
#include "lgdt330x.h"
#include "s5h1409.h"
+#include "s5h1411.h"
#include "tda10048.h"
#include "tda18271.h"
#include "tda8290.h"
@@ -368,6 +369,15 @@ static struct s5h1409_config pvr2_s5h1409_config = {
.status_mode = S5H1409_DEMODLOCKING,
};
+static struct s5h1411_config pvr2_s5h1411_config = {
+ .output_mode = S5H1411_PARALLEL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .vsb_if = S5H1411_IF_44000,
+ .qam_if = S5H1411_IF_4000,
+ .inversion = S5H1411_INVERSION_ON,
+ .status_mode = S5H1411_DEMODLOCKING,
+};
+
static struct tda18271_std_map hauppauge_tda18271_std_map = {
.atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3,
.if_lvl = 6, .rfagc_top = 0x37, },
@@ -390,6 +400,16 @@ static int pvr2_s5h1409_attach(struct pvr2_dvb_adapter *adap)
return -EIO;
}
+static int pvr2_s5h1411_attach(struct pvr2_dvb_adapter *adap)
+{
+ adap->fe = dvb_attach(s5h1411_attach, &pvr2_s5h1411_config,
+ &adap->channel.hdw->i2c_adap);
+ if (adap->fe)
+ return 0;
+
+ return -EIO;
+}
+
static int pvr2_tda18271_8295_attach(struct pvr2_dvb_adapter *adap)
{
dvb_attach(tda829x_attach, adap->fe,
@@ -406,6 +426,11 @@ struct pvr2_dvb_props pvr2_750xx_dvb_props = {
.frontend_attach = pvr2_s5h1409_attach,
.tuner_attach = pvr2_tda18271_8295_attach,
};
+
+struct pvr2_dvb_props pvr2_751xx_dvb_props = {
+ .frontend_attach = pvr2_s5h1411_attach,
+ .tuner_attach = pvr2_tda18271_8295_attach,
+};
#endif
static const char *pvr2_client_75xxx[] = {
@@ -454,6 +479,9 @@ static const struct pvr2_device_desc pvr2_device_751xx = {
.digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE,
.default_std_mask = V4L2_STD_NTSC_M,
.led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
+#ifdef CONFIG_VIDEO_PVRUSB2_DVB
+ .dvb_props = &pvr2_751xx_dvb_props,
+#endif
};
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.h b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
index c2e2b06fe2e..d016f8b6c70 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
@@ -104,28 +104,28 @@ struct pvr2_device_desc {
unsigned char digital_control_scheme;
/* If set, we don't bother trying to load cx23416 firmware. */
- int flag_skip_cx23416_firmware:1;
+ unsigned int flag_skip_cx23416_firmware:1;
/* If set, the encoder must be healthy in order for digital mode to
work (otherwise we assume that digital streaming will work even
if we fail to locate firmware for the encoder). If the device
doesn't support digital streaming then this flag has no
effect. */
- int flag_digital_requires_cx23416:1;
+ unsigned int flag_digital_requires_cx23416:1;
/* Device has a hauppauge eeprom which we can interrogate. */
- int flag_has_hauppauge_rom:1;
+ unsigned int flag_has_hauppauge_rom:1;
/* Device does not require a powerup command to be issued. */
- int flag_no_powerup:1;
+ unsigned int flag_no_powerup:1;
/* Device has a cx25840 - this enables special additional logic to
handle it. */
- int flag_has_cx25840:1;
+ unsigned int flag_has_cx25840:1;
/* Device has a wm8775 - this enables special additional logic to
ensure that it is found. */
- int flag_has_wm8775:1;
+ unsigned int flag_has_wm8775:1;
/* Device has IR hardware that can be faked into looking like a
normal Hauppauge i2c IR receiver. This is currently very
@@ -135,15 +135,15 @@ struct pvr2_device_desc {
to virtualize the presence of the non-existant IR receiver chip and
implement the virtual receiver in terms of appropriate FX2
commands. */
- int flag_has_hauppauge_custom_ir:1;
+ unsigned int flag_has_hauppauge_custom_ir:1;
/* These bits define which kinds of sources the device can handle.
Note: Digital tuner presence is inferred by the
digital_control_scheme enumeration. */
- int flag_has_fmradio:1; /* Has FM radio receiver */
- int flag_has_analogtuner:1; /* Has analog tuner */
- int flag_has_composite:1; /* Has composite input */
- int flag_has_svideo:1; /* Has s-video input */
+ unsigned int flag_has_fmradio:1; /* Has FM radio receiver */
+ unsigned int flag_has_analogtuner:1; /* Has analog tuner */
+ unsigned int flag_has_composite:1; /* Has composite input */
+ unsigned int flag_has_svideo:1; /* Has s-video input */
};
extern struct usb_device_id pvr2_device_table[];
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 529e00952a8..2b72e10e6b9 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -369,19 +369,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
break;
}
case TUNER_TEA5767:
- if (tea5767_attach(&t->fe, t->i2c->adapter, t->i2c->addr) == NULL) {
- t->type = TUNER_ABSENT;
- t->mode_mask = T_UNINITIALIZED;
- return;
- }
+ if (!tea5767_attach(&t->fe, t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
t->mode_mask = T_RADIO;
break;
case TUNER_TEA5761:
- if (tea5761_attach(&t->fe, t->i2c->adapter, t->i2c->addr) == NULL) {
- t->type = TUNER_ABSENT;
- t->mode_mask = T_UNINITIALIZED;
- return;
- }
+ if (!tea5761_attach(&t->fe, t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
t->mode_mask = T_RADIO;
break;
case TUNER_PHILIPS_FMD1216ME_MK3:
@@ -394,12 +388,9 @@ static void set_type(struct i2c_client *c, unsigned int type,
buffer[2] = 0x86;
buffer[3] = 0x54;
i2c_master_send(c, buffer, 4);
- if (simple_tuner_attach(&t->fe, t->i2c->adapter, t->i2c->addr,
- t->type) == NULL) {
- t->type = TUNER_ABSENT;
- t->mode_mask = T_UNINITIALIZED;
- return;
- }
+ if (!simple_tuner_attach(&t->fe, t->i2c->adapter, t->i2c->addr,
+ t->type))
+ goto attach_failed;
break;
case TUNER_PHILIPS_TD1316:
buffer[0] = 0x0b;
@@ -407,12 +398,9 @@ static void set_type(struct i2c_client *c, unsigned int type,
buffer[2] = 0x86;
buffer[3] = 0xa4;
i2c_master_send(c,buffer,4);
- if (simple_tuner_attach(&t->fe, t->i2c->adapter,
- t->i2c->addr, t->type) == NULL) {
- t->type = TUNER_ABSENT;
- t->mode_mask = T_UNINITIALIZED;
- return;
- }
+ if (!simple_tuner_attach(&t->fe, t->i2c->adapter,
+ t->i2c->addr, t->type))
+ goto attach_failed;
break;
case TUNER_XC2028:
{
@@ -421,40 +409,34 @@ static void set_type(struct i2c_client *c, unsigned int type,
.i2c_addr = t->i2c->addr,
.callback = t->tuner_callback,
};
- if (!xc2028_attach(&t->fe, &cfg)) {
- t->type = TUNER_ABSENT;
- t->mode_mask = T_UNINITIALIZED;
- return;
- }
+ if (!xc2028_attach(&t->fe, &cfg))
+ goto attach_failed;
break;
}
case TUNER_TDA9887:
tda9887_attach(&t->fe, t->i2c->adapter, t->i2c->addr);
break;
case TUNER_XC5000:
+ {
+ struct dvb_tuner_ops *xc_tuner_ops;
+
xc5000_cfg.i2c_address = t->i2c->addr;
xc5000_cfg.if_khz = 5380;
xc5000_cfg.priv = c->adapter->algo_data;
xc5000_cfg.tuner_callback = t->tuner_callback;
- if (!xc5000_attach(&t->fe, t->i2c->adapter, &xc5000_cfg)) {
- t->type = TUNER_ABSENT;
- t->mode_mask = T_UNINITIALIZED;
- return;
- }
- {
- struct dvb_tuner_ops *xc_tuner_ops;
+ if (!xc5000_attach(&t->fe, t->i2c->adapter, &xc5000_cfg))
+ goto attach_failed;
+
xc_tuner_ops = &t->fe.ops.tuner_ops;
- if(xc_tuner_ops->init != NULL)
+ if (xc_tuner_ops->init)
xc_tuner_ops->init(&t->fe);
- }
break;
+ }
default:
- if (simple_tuner_attach(&t->fe, t->i2c->adapter,
- t->i2c->addr, t->type) == NULL) {
- t->type = TUNER_ABSENT;
- t->mode_mask = T_UNINITIALIZED;
- return;
- }
+ if (!simple_tuner_attach(&t->fe, t->i2c->adapter,
+ t->i2c->addr, t->type))
+ goto attach_failed;
+
break;
}
@@ -476,11 +458,27 @@ static void set_type(struct i2c_client *c, unsigned int type,
if (t->mode_mask == T_UNINITIALIZED)
t->mode_mask = new_mode_mask;
- set_freq(c, (V4L2_TUNER_RADIO == t->mode) ? t->radio_freq : t->tv_freq);
+ /* xc2028/3028 and xc5000 requires a firmware to be set-up later
+ trying to set a frequency here will just fail
+ FIXME: better to move set_freq to the tuner code. This is needed
+ on analog tuners for PLL to properly work
+ */
+ if (t->type != TUNER_XC2028 && t->type != TUNER_XC5000)
+ set_freq(c, (V4L2_TUNER_RADIO == t->mode) ?
+ t->radio_freq : t->tv_freq);
+
tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
c->adapter->name, c->driver->driver.name, c->addr << 1, type,
t->mode_mask);
tuner_i2c_address_check(t);
+ return;
+
+attach_failed:
+ tuner_dbg("Tuner attach for type = %d failed.\n", t->type);
+ t->type = TUNER_ABSENT;
+ t->mode_mask = T_UNINITIALIZED;
+
+ return;
}
/*
@@ -495,14 +493,16 @@ static void set_addr(struct i2c_client *c, struct tuner_setup *tun_setup)
{
struct tuner *t = i2c_get_clientdata(c);
- tuner_dbg("set addr for type %i\n", t->type);
-
if ( (t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
(t->mode_mask & tun_setup->mode_mask))) ||
(tun_setup->addr == c->addr)) {
set_type(c, tun_setup->type, tun_setup->mode_mask,
tun_setup->config, tun_setup->tuner_callback);
- }
+ } else
+ tuner_dbg("set addr discarded for type %i, mask %x. "
+ "Asked to change tuner at addr 0x%02x, with mask %x\n",
+ t->type, t->mode_mask,
+ tun_setup->addr, tun_setup->mode_mask);
}
static inline int check_mode(struct tuner *t, char *cmd)
diff --git a/drivers/media/video/tuner-xc2028.c b/drivers/media/video/tuner-xc2028.c
index cc3db7d79a0..9e9003cffc7 100644
--- a/drivers/media/video/tuner-xc2028.c
+++ b/drivers/media/video/tuner-xc2028.c
@@ -432,7 +432,7 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
type &= type_mask;
- if (!type & SCODE)
+ if (!(type & SCODE))
type_mask = ~0;
/* Seek for exact match */
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index b1e9592acb9..845be1864f6 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -888,7 +888,7 @@ static int vivi_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct vivi_dev *dev;
- struct vivi_fh *fh;
+ struct vivi_fh *fh = NULL;
int i;
int retval = 0;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index fafb57fed76..0736cff9d97 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -31,7 +31,6 @@
static LIST_HEAD(container_list);
static DEFINE_MUTEX(container_list_lock);
static struct class enclosure_class;
-static struct class enclosure_component_class;
/**
* enclosure_find - find an enclosure given a device
@@ -166,6 +165,40 @@ void enclosure_unregister(struct enclosure_device *edev)
}
EXPORT_SYMBOL_GPL(enclosure_unregister);
+#define ENCLOSURE_NAME_SIZE 64
+
+static void enclosure_link_name(struct enclosure_component *cdev, char *name)
+{
+ strcpy(name, "enclosure_device:");
+ strcat(name, cdev->cdev.bus_id);
+}
+
+static void enclosure_remove_links(struct enclosure_component *cdev)
+{
+ char name[ENCLOSURE_NAME_SIZE];
+
+ enclosure_link_name(cdev, name);
+ sysfs_remove_link(&cdev->dev->kobj, name);
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
+}
+
+static int enclosure_add_links(struct enclosure_component *cdev)
+{
+ int error;
+ char name[ENCLOSURE_NAME_SIZE];
+
+ error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device");
+ if (error)
+ return error;
+
+ enclosure_link_name(cdev, name);
+ error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name);
+ if (error)
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
+
+ return error;
+}
+
static void enclosure_release(struct device *cdev)
{
struct enclosure_device *edev = to_enclosure_device(cdev);
@@ -178,10 +211,15 @@ static void enclosure_component_release(struct device *dev)
{
struct enclosure_component *cdev = to_enclosure_component(dev);
- put_device(cdev->dev);
+ if (cdev->dev) {
+ enclosure_remove_links(cdev);
+ put_device(cdev->dev);
+ }
put_device(dev->parent);
}
+static struct attribute_group *enclosure_groups[];
+
/**
* enclosure_component_register - add a particular component to an enclosure
* @edev: the enclosure to add the component
@@ -217,12 +255,14 @@ enclosure_component_register(struct enclosure_device *edev,
ecomp->number = number;
cdev = &ecomp->cdev;
cdev->parent = get_device(&edev->edev);
- cdev->class = &enclosure_component_class;
if (name)
snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name);
else
snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number);
+ cdev->release = enclosure_component_release;
+ cdev->groups = enclosure_groups;
+
err = device_register(cdev);
if (err)
ERR_PTR(err);
@@ -255,10 +295,12 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
cdev = &edev->component[component];
- device_del(&cdev->cdev);
+ if (cdev->dev)
+ enclosure_remove_links(cdev);
+
put_device(cdev->dev);
cdev->dev = get_device(dev);
- return device_add(&cdev->cdev);
+ return enclosure_add_links(cdev);
}
EXPORT_SYMBOL_GPL(enclosure_add_device);
@@ -442,24 +484,32 @@ static ssize_t get_component_type(struct device *cdev,
}
-static struct device_attribute enclosure_component_attrs[] = {
- __ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
- set_component_fault),
- __ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
- set_component_status),
- __ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
- set_component_active),
- __ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
- set_component_locate),
- __ATTR(type, S_IRUGO, get_component_type, NULL),
- __ATTR_NULL
+static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
+ set_component_fault);
+static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
+ set_component_status);
+static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
+ set_component_active);
+static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
+ set_component_locate);
+static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL);
+
+static struct attribute *enclosure_component_attrs[] = {
+ &dev_attr_fault.attr,
+ &dev_attr_status.attr,
+ &dev_attr_active.attr,
+ &dev_attr_locate.attr,
+ &dev_attr_type.attr,
+ NULL
};
-static struct class enclosure_component_class = {
- .name = "enclosure_component",
- .owner = THIS_MODULE,
- .dev_attrs = enclosure_component_attrs,
- .dev_release = enclosure_component_release,
+static struct attribute_group enclosure_group = {
+ .attrs = enclosure_component_attrs,
+};
+
+static struct attribute_group *enclosure_groups[] = {
+ &enclosure_group,
+ NULL
};
static int __init enclosure_init(void)
@@ -469,20 +519,12 @@ static int __init enclosure_init(void)
err = class_register(&enclosure_class);
if (err)
return err;
- err = class_register(&enclosure_component_class);
- if (err)
- goto err_out;
return 0;
- err_out:
- class_unregister(&enclosure_class);
-
- return err;
}
static void __exit enclosure_exit(void)
{
- class_unregister(&enclosure_component_class);
class_unregister(&enclosure_class);
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0697aa8ea77..8082c1d142d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2011,7 +2011,7 @@ config E1000_DISABLE_PACKET_SPLIT
config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
- depends on PCI
+ depends on PCI && (!SPARC32 || BROKEN)
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 75ef9d0d974..f9d6b4dca18 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -196,3 +196,160 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
}
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
+
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
+{
+ struct mlx4_db_pgdir *pgdir;
+
+ pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
+ if (!pgdir)
+ return NULL;
+
+ bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
+ pgdir->bits[0] = pgdir->order0;
+ pgdir->bits[1] = pgdir->order1;
+ pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
+ &pgdir->db_dma, GFP_KERNEL);
+ if (!pgdir->db_page) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ return pgdir;
+}
+
+static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
+ struct mlx4_db *db, int order)
+{
+ int o;
+ int i;
+
+ for (o = order; o <= 1; ++o) {
+ i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
+ if (i < MLX4_DB_PER_PAGE >> o)
+ goto found;
+ }
+
+ return -ENOMEM;
+
+found:
+ clear_bit(i, pgdir->bits[o]);
+
+ i <<= o;
+
+ if (o > order)
+ set_bit(i ^ 1, pgdir->bits[order]);
+
+ db->u.pgdir = pgdir;
+ db->index = i;
+ db->db = pgdir->db_page + db->index;
+ db->dma = pgdir->db_dma + db->index * 4;
+ db->order = order;
+
+ return 0;
+}
+
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_db_pgdir *pgdir;
+ int ret = 0;
+
+ mutex_lock(&priv->pgdir_mutex);
+
+ list_for_each_entry(pgdir, &priv->pgdir_list, list)
+ if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
+ goto out;
+
+ pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
+ if (!pgdir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&pgdir->list, &priv->pgdir_list);
+
+ /* This should never fail -- we just allocated an empty page: */
+ WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
+
+out:
+ mutex_unlock(&priv->pgdir_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_db_alloc);
+
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int o;
+ int i;
+
+ mutex_lock(&priv->pgdir_mutex);
+
+ o = db->order;
+ i = db->index;
+
+ if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
+ clear_bit(i ^ 1, db->u.pgdir->order0);
+ ++o;
+ }
+ i >>= o;
+ set_bit(i, db->u.pgdir->bits[o]);
+
+ if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ db->u.pgdir->db_page, db->u.pgdir->db_dma);
+ list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir);
+ }
+
+ mutex_unlock(&priv->pgdir_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_db_free);
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+ int size, int max_direct)
+{
+ int err;
+
+ err = mlx4_db_alloc(dev, &wqres->db, 1);
+ if (err)
+ return err;
+
+ *wqres->db.db = 0;
+
+ err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
+ if (err)
+ goto err_db;
+
+ err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
+ &wqres->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
+ if (err)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ mlx4_mtt_cleanup(dev, &wqres->mtt);
+err_buf:
+ mlx4_buf_free(dev, size, &wqres->buf);
+err_db:
+ mlx4_db_free(dev, &wqres->db);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
+
+void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+ int size)
+{
+ mlx4_mtt_cleanup(dev, &wqres->mtt);
+ mlx4_buf_free(dev, size, &wqres->buf);
+ mlx4_db_free(dev, &wqres->db);
+}
+EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index caa5bcf54e3..6fda0af9d0a 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -180,7 +180,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
cq_context->mtt_base_addr_h = mtt_addr >> 32;
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
- err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
+ err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 49a4acab5e8..a6aa49fc1d6 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ INIT_LIST_HEAD(&priv->pgdir_list);
+ mutex_init(&priv->pgdir_mutex);
+
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 73336810e65..a4023c2dd05 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -257,6 +257,9 @@ struct mlx4_priv {
struct list_head ctx_list;
spinlock_t ctx_lock;
+ struct list_head pgdir_list;
+ struct mutex pgdir_mutex;
+
struct mlx4_fw fw;
struct mlx4_cmd cmd;
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index fa24e659759..ee5484c44a1 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -299,3 +299,34 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
}
EXPORT_SYMBOL_GPL(mlx4_qp_query);
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_qp_context *context,
+ struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
+{
+ int err;
+ int i;
+ enum mlx4_qp_state states[] = {
+ MLX4_QP_STATE_RST,
+ MLX4_QP_STATE_INIT,
+ MLX4_QP_STATE_RTR,
+ MLX4_QP_STATE_RTS
+ };
+
+ for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+ context->flags &= cpu_to_be32(~(0xf << 28));
+ context->flags |= cpu_to_be32(states[i + 1] << 28);
+ err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
+ context, 0, 0, qp);
+ if (err) {
+ mlx4_err(dev, "Failed to bring QP to state: "
+ "%d with error: %d\n",
+ states[i + 1], err);
+ return err;
+ }
+
+ *qp_state = states[i + 1];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 5a888704a8d..4f4e7cf105d 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -5,7 +5,7 @@
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
obj-y += s390mach.o sysinfo.o s390_rdev.o
-obj-y += cio/ block/ char/ crypto/ net/ scsi/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
new file mode 100644
index 00000000000..4a5ec39f9ca
--- /dev/null
+++ b/drivers/s390/kvm/Makefile
@@ -0,0 +1,9 @@
+# Makefile for kvm guest drivers on s390
+#
+# Copyright IBM Corp. 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (version 2 only)
+# as published by the Free Software Foundation.
+
+obj-$(CONFIG_VIRTIO) += kvm_virtio.o
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
new file mode 100644
index 00000000000..bbef3764fbf
--- /dev/null
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -0,0 +1,338 @@
+/*
+ * kvm_virtio.c - virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <asm/io.h>
+#include <asm/kvm_para.h>
+#include <asm/kvm_virtio.h>
+#include <asm/setup.h>
+#include <asm/s390_ext.h>
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+/*
+ * The pointer to our (page) of device descriptions.
+ */
+static void *kvm_devices;
+
+/*
+ * Unique numbering for kvm devices.
+ */
+static unsigned int dev_index;
+
+struct kvm_device {
+ struct virtio_device vdev;
+ struct kvm_device_desc *desc;
+};
+
+#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
+
+/*
+ * memory layout:
+ * - kvm_device_descriptor
+ * struct kvm_device_desc
+ * - configuration
+ * struct kvm_vqconfig
+ * - feature bits
+ * - config space
+ */
+static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
+{
+ return (struct kvm_vqconfig *)(desc + 1);
+}
+
+static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
+{
+ return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
+}
+
+static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
+{
+ return kvm_vq_features(desc) + desc->feature_len * 2;
+}
+
+/*
+ * The total size of the config page used by this device (incl. desc)
+ */
+static unsigned desc_size(const struct kvm_device_desc *desc)
+{
+ return sizeof(*desc)
+ + desc->num_vq * sizeof(struct kvm_vqconfig)
+ + desc->feature_len * 2
+ + desc->config_len;
+}
+
+/*
+ * This tests (and acknowleges) a feature bit.
+ */
+static bool kvm_feature(struct virtio_device *vdev, unsigned fbit)
+{
+ struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+ u8 *features;
+
+ if (fbit / 8 > desc->feature_len)
+ return false;
+
+ features = kvm_vq_features(desc);
+ if (!(features[fbit / 8] & (1 << (fbit % 8))))
+ return false;
+
+ /*
+ * We set the matching bit in the other half of the bitmap to tell the
+ * Host we want to use this feature.
+ */
+ features[desc->feature_len + fbit / 8] |= (1 << (fbit % 8));
+ return true;
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void kvm_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned len)
+{
+ struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+ BUG_ON(offset + len > desc->config_len);
+ memcpy(buf, kvm_vq_configspace(desc) + offset, len);
+}
+
+static void kvm_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned len)
+{
+ struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+ BUG_ON(offset + len > desc->config_len);
+ memcpy(kvm_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access
+ * the status field of the device descriptor. set_status will also
+ * make a hypercall to the host, to tell about status changes
+ */
+static u8 kvm_get_status(struct virtio_device *vdev)
+{
+ return to_kvmdev(vdev)->desc->status;
+}
+
+static void kvm_set_status(struct virtio_device *vdev, u8 status)
+{
+ BUG_ON(!status);
+ to_kvmdev(vdev)->desc->status = status;
+ kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
+ (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
+ * descriptor address. The Host will zero the status and all the
+ * features.
+ */
+static void kvm_reset(struct virtio_device *vdev)
+{
+ kvm_hypercall1(KVM_S390_VIRTIO_RESET,
+ (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * When the virtio_ring code wants to notify the Host, it calls us here and we
+ * make a hypercall. We hand the address of the virtqueue so the Host
+ * knows which virtqueue we're talking about.
+ */
+static void kvm_notify(struct virtqueue *vq)
+{
+ struct kvm_vqconfig *config = vq->priv;
+
+ kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+}
+
+/*
+ * This routine finds the first virtqueue described in the configuration of
+ * this device and sets it up.
+ */
+static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
+ unsigned index,
+ void (*callback)(struct virtqueue *vq))
+{
+ struct kvm_device *kdev = to_kvmdev(vdev);
+ struct kvm_vqconfig *config;
+ struct virtqueue *vq;
+ int err;
+
+ if (index >= kdev->desc->num_vq)
+ return ERR_PTR(-ENOENT);
+
+ config = kvm_vq_config(kdev->desc)+index;
+
+ if (add_shared_memory(config->address,
+ vring_size(config->num, PAGE_SIZE))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ vq = vring_new_virtqueue(config->num, vdev, (void *) config->address,
+ kvm_notify, callback);
+ if (!vq) {
+ err = -ENOMEM;
+ goto unmap;
+ }
+
+ /*
+ * register a callback token
+ * The host will sent this via the external interrupt parameter
+ */
+ config->token = (u64) vq;
+
+ vq->priv = config;
+ return vq;
+unmap:
+ remove_shared_memory(config->address, vring_size(config->num,
+ PAGE_SIZE));
+out:
+ return ERR_PTR(err);
+}
+
+static void kvm_del_vq(struct virtqueue *vq)
+{
+ struct kvm_vqconfig *config = vq->priv;
+
+ vring_del_virtqueue(vq);
+ remove_shared_memory(config->address,
+ vring_size(config->num, PAGE_SIZE));
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops kvm_vq_configspace_ops = {
+ .feature = kvm_feature,
+ .get = kvm_get,
+ .set = kvm_set,
+ .get_status = kvm_get_status,
+ .set_status = kvm_set_status,
+ .reset = kvm_reset,
+ .find_vq = kvm_find_vq,
+ .del_vq = kvm_del_vq,
+};
+
+/*
+ * The root device for the kvm virtio devices.
+ * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
+ */
+static struct device kvm_root = {
+ .parent = NULL,
+ .bus_id = "kvm_s390",
+};
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static void add_kvm_device(struct kvm_device_desc *d)
+{
+ struct kvm_device *kdev;
+
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev) {
+ printk(KERN_EMERG "Cannot allocate kvm dev %u\n",
+ dev_index++);
+ return;
+ }
+
+ kdev->vdev.dev.parent = &kvm_root;
+ kdev->vdev.index = dev_index++;
+ kdev->vdev.id.device = d->type;
+ kdev->vdev.config = &kvm_vq_configspace_ops;
+ kdev->desc = d;
+
+ if (register_virtio_device(&kdev->vdev) != 0) {
+ printk(KERN_ERR "Failed to register kvm device %u\n",
+ kdev->vdev.index);
+ kfree(kdev);
+ }
+}
+
+/*
+ * scan_devices() simply iterates through the device page.
+ * The type 0 is reserved to mean "end of devices".
+ */
+static void scan_devices(void)
+{
+ unsigned int i;
+ struct kvm_device_desc *d;
+
+ for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
+ d = kvm_devices + i;
+
+ if (d->type == 0)
+ break;
+
+ add_kvm_device(d);
+ }
+}
+
+/*
+ * we emulate the request_irq behaviour on top of s390 extints
+ */
+static void kvm_extint_handler(u16 code)
+{
+ void *data = (void *) *(long *) __LC_PFAULT_INTPARM;
+ u16 subcode = S390_lowcore.cpu_addr;
+
+ if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
+ return;
+
+ vring_interrupt(0, data);
+}
+
+/*
+ * Init function for virtio
+ * devices are in a single page above top of "normal" mem
+ */
+static int __init kvm_devices_init(void)
+{
+ int rc;
+
+ if (!MACHINE_IS_KVM)
+ return -ENODEV;
+
+ rc = device_register(&kvm_root);
+ if (rc) {
+ printk(KERN_ERR "Could not register kvm_s390 root device");
+ return rc;
+ }
+
+ if (add_shared_memory((max_pfn) << PAGE_SHIFT, PAGE_SIZE)) {
+ device_unregister(&kvm_root);
+ return -ENOMEM;
+ }
+
+ kvm_devices = (void *) (max_pfn << PAGE_SHIFT);
+
+ ctl_set_bit(0, 9);
+ register_external_interrupt(0x2603, kvm_extint_handler);
+
+ scan_devices();
+ return 0;
+}
+
+/*
+ * We do this after core stuff, but before the drivers.
+ */
+postcore_initcall(kvm_devices_init);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 7c3f02816e9..9af2330f07a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1927,7 +1927,8 @@ zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
/* setup new FSF request */
retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
- 0, NULL, &lock_flags, &fsf_req);
+ ZFCP_WAIT_FOR_SBAL, NULL, &lock_flags,
+ &fsf_req);
if (retval) {
ZFCP_LOG_INFO("error: Could not create exchange configuration "
"data request for adapter %s.\n",
@@ -2035,21 +2036,21 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
min(FC_SERIAL_NUMBER_SIZE, 17));
}
- ZFCP_LOG_NORMAL("The adapter %s reported the following "
- "characteristics:\n"
- "WWNN 0x%016Lx, "
- "WWPN 0x%016Lx, "
- "S_ID 0x%06x,\n"
- "adapter version 0x%x, "
- "LIC version 0x%x, "
- "FC link speed %d Gb/s\n",
- zfcp_get_busid_by_adapter(adapter),
- (wwn_t) fc_host_node_name(shost),
- (wwn_t) fc_host_port_name(shost),
- fc_host_port_id(shost),
- adapter->hydra_version,
- adapter->fsf_lic_version,
- fc_host_speed(shost));
+ if (fsf_req->erp_action)
+ ZFCP_LOG_NORMAL("The adapter %s reported the following "
+ "characteristics:\n"
+ "WWNN 0x%016Lx, WWPN 0x%016Lx, "
+ "S_ID 0x%06x,\n"
+ "adapter version 0x%x, "
+ "LIC version 0x%x, "
+ "FC link speed %d Gb/s\n",
+ zfcp_get_busid_by_adapter(adapter),
+ (wwn_t) fc_host_node_name(shost),
+ (wwn_t) fc_host_port_name(shost),
+ fc_host_port_id(shost),
+ adapter->hydra_version,
+ adapter->fsf_lic_version,
+ fc_host_speed(shost));
if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
ZFCP_LOG_NORMAL("error: the adapter %s "
"only supports newer control block "
@@ -2114,8 +2115,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req);
return -EIO;
case FC_PORTTYPE_NPORT:
- ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
- "network detected at adapter %s.\n",
+ if (fsf_req->erp_action)
+ ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
+ "network detected at adapter "
+ "%s.\n",
zfcp_get_busid_by_adapter(adapter));
break;
default:
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 8cce5cc11d5..099970b2700 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -213,6 +213,7 @@
#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
+#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
/* host connection features */
#define FSF_FEATURE_NPIV_MODE 0x00000001
@@ -340,6 +341,15 @@ struct fsf_qtcb_prefix {
u8 res1[20];
} __attribute__ ((packed));
+struct fsf_statistics_info {
+ u64 input_req;
+ u64 output_req;
+ u64 control_req;
+ u64 input_mb;
+ u64 output_mb;
+ u64 seconds_act;
+} __attribute__ ((packed));
+
union fsf_status_qual {
u8 byte[FSF_STATUS_QUALIFIER_SIZE];
u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
@@ -436,7 +446,8 @@ struct fsf_qtcb_bottom_config {
u32 hardware_version;
u8 serial_number[32];
struct fsf_nport_serv_param plogi_payload;
- u8 res4[160];
+ struct fsf_statistics_info stat_info;
+ u8 res4[112];
} __attribute__ ((packed));
struct fsf_qtcb_bottom_port {
@@ -469,7 +480,10 @@ struct fsf_qtcb_bottom_port {
u64 control_requests;
u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */
u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */
- u8 res2[256];
+ u8 cp_util;
+ u8 cb_util;
+ u8 a_util;
+ u8 res2[253];
} __attribute__ ((packed));
union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f81850624ee..01687559dc0 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -40,6 +40,7 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
unsigned int, unsigned int);
static struct device_attribute *zfcp_sysfs_sdev_attrs[];
+static struct device_attribute *zfcp_a_stats_attrs[];
struct zfcp_data zfcp_data = {
.scsi_host_template = {
@@ -61,6 +62,7 @@ struct zfcp_data zfcp_data = {
.use_clustering = 1,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
.max_sectors = ZFCP_MAX_SECTORS,
+ .shost_attrs = zfcp_a_stats_attrs,
},
.driver_version = ZFCP_VERSION,
};
@@ -809,4 +811,116 @@ static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
NULL
};
+static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *scsi_host = dev_to_shost(dev);
+ struct fsf_qtcb_bottom_port *qtcb_port;
+ int retval;
+ struct zfcp_adapter *adapter;
+
+ adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+ if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+ return -EOPNOTSUPP;
+
+ qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
+ if (!qtcb_port)
+ return -ENOMEM;
+
+ retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
+ if (!retval)
+ retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
+ qtcb_port->cb_util, qtcb_port->a_util);
+ kfree(qtcb_port);
+ return retval;
+}
+
+static int zfcp_sysfs_adapter_ex_config(struct device *dev,
+ struct fsf_statistics_info *stat_inf)
+{
+ int retval;
+ struct fsf_qtcb_bottom_config *qtcb_config;
+ struct Scsi_Host *scsi_host = dev_to_shost(dev);
+ struct zfcp_adapter *adapter;
+
+ adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+ if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+ return -EOPNOTSUPP;
+
+ qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
+ GFP_KERNEL);
+ if (!qtcb_config)
+ return -ENOMEM;
+
+ retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
+ if (!retval)
+ *stat_inf = qtcb_config->stat_info;
+
+ kfree(qtcb_config);
+ return retval;
+}
+
+static ssize_t zfcp_sysfs_adapter_request_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsf_statistics_info stat_info;
+ int retval;
+
+ retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%llu %llu %llu\n",
+ (unsigned long long) stat_info.input_req,
+ (unsigned long long) stat_info.output_req,
+ (unsigned long long) stat_info.control_req);
+}
+
+static ssize_t zfcp_sysfs_adapter_mb_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsf_statistics_info stat_info;
+ int retval;
+
+ retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%llu %llu\n",
+ (unsigned long long) stat_info.input_mb,
+ (unsigned long long) stat_info.output_mb);
+}
+
+static ssize_t zfcp_sysfs_adapter_sec_active_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsf_statistics_info stat_info;
+ int retval;
+
+ retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%llu\n",
+ (unsigned long long) stat_info.seconds_act);
+}
+
+static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
+static DEVICE_ATTR(requests, S_IRUGO, zfcp_sysfs_adapter_request_show, NULL);
+static DEVICE_ATTR(megabytes, S_IRUGO, zfcp_sysfs_adapter_mb_show, NULL);
+static DEVICE_ATTR(seconds_active, S_IRUGO,
+ zfcp_sysfs_adapter_sec_active_show, NULL);
+
+static struct device_attribute *zfcp_a_stats_attrs[] = {
+ &dev_attr_utilization,
+ &dev_attr_requests,
+ &dev_attr_megabytes,
+ &dev_attr_seconds_active,
+ NULL
+};
+
#undef ZFCP_LOG_AREA
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index b374e457e5e..b898d382b7b 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1499,7 +1499,7 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb)
thisCard = ((struct sccb_card *)pCurrCard)->cardIndex;
ioport = ((struct sccb_card *)pCurrCard)->ioPort;
- if ((p_Sccb->TargID > MAX_SCSI_TAR) || (p_Sccb->Lun > MAX_LUN)) {
+ if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) {
p_Sccb->HostStatus = SCCB_COMPLETE;
p_Sccb->SccbStatus = SCCB_ERROR;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7f78e3ea517..99c57b0c1d5 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1677,6 +1677,16 @@ config MAC_SCSI
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
+config SCSI_MAC_ESP
+ tristate "Macintosh NCR53c9[46] SCSI"
+ depends on MAC && SCSI
+ help
+ This is the NCR 53c9x SCSI controller found on most of the 68040
+ based Macintoshes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mac_esp.
+
config MVME147_SCSI
bool "WD33C93 SCSI driver for MVME147"
depends on MVME147 && SCSI=y
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 23e6ecbd477..6c775e350c9 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
+obj-$(CONFIG_SCSI_MAC_ESP) += esp_scsi.o mac_esp.o
obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 6ccdc96cc48..a09b2d3fdf5 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1432,15 +1432,10 @@ static void run(struct work_struct *work)
*/
static irqreturn_t intr(int irqno, void *dev_id)
{
- struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id;
+ struct Scsi_Host *shpnt = dev_id;
unsigned long flags;
unsigned char rev, dmacntrl0;
- if (!shpnt) {
- printk(KERN_ERR "aha152x: catched interrupt %d for unknown controller.\n", irqno);
- return IRQ_NONE;
- }
-
/*
* Read a couple of registers that are known to not be all 1's. If
* we read all 1's (-1), that means that either:
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 5a1471c370f..80594947c6f 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -153,8 +153,6 @@ struct aha1542_hostdata {
#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
-static struct Scsi_Host *aha_host[7]; /* One for each IRQ level (9-15) */
-
static DEFINE_SPINLOCK(aha1542_lock);
@@ -163,8 +161,7 @@ static DEFINE_SPINLOCK(aha1542_lock);
static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt);
static int aha1542_restart(struct Scsi_Host *shost);
-static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id);
-static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id);
+static void aha1542_intr_handle(struct Scsi_Host *shost);
#define aha1542_intr_reset(base) outb(IRST, CONTROL(base))
@@ -404,23 +401,19 @@ fail:
}
/* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */
-static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id)
+static irqreturn_t do_aha1542_intr_handle(int dummy, void *dev_id)
{
unsigned long flags;
- struct Scsi_Host *shost;
-
- shost = aha_host[irq - 9];
- if (!shost)
- panic("Splunge!");
+ struct Scsi_Host *shost = dev_id;
spin_lock_irqsave(shost->host_lock, flags);
- aha1542_intr_handle(shost, dev_id);
+ aha1542_intr_handle(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
return IRQ_HANDLED;
}
/* A "high" level interrupt handler */
-static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id)
+static void aha1542_intr_handle(struct Scsi_Host *shost)
{
void (*my_done) (Scsi_Cmnd *) = NULL;
int errstatus, mbi, mbo, mbistatus;
@@ -1197,7 +1190,8 @@ fail:
DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
spin_lock_irqsave(&aha1542_lock, flags);
- if (request_irq(irq_level, do_aha1542_intr_handle, 0, "aha1542", NULL)) {
+ if (request_irq(irq_level, do_aha1542_intr_handle, 0,
+ "aha1542", shpnt)) {
printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n");
spin_unlock_irqrestore(&aha1542_lock, flags);
goto unregister;
@@ -1205,7 +1199,7 @@ fail:
if (dma_chan != 0xFF) {
if (request_dma(dma_chan, "aha1542")) {
printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n");
- free_irq(irq_level, NULL);
+ free_irq(irq_level, shpnt);
spin_unlock_irqrestore(&aha1542_lock, flags);
goto unregister;
}
@@ -1214,7 +1208,7 @@ fail:
enable_dma(dma_chan);
}
}
- aha_host[irq_level - 9] = shpnt;
+
shpnt->this_id = scsi_id;
shpnt->unique_id = base_io;
shpnt->io_port = base_io;
@@ -1276,7 +1270,7 @@ unregister:
static int aha1542_release(struct Scsi_Host *shost)
{
if (shost->irq)
- free_irq(shost->irq, NULL);
+ free_irq(shost->irq, shost);
if (shost->dma_channel != 0xff)
free_dma(shost->dma_channel);
if (shost->io_port && shost->n_io_port)
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 2f00467b6b8..be5558ab84e 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -815,7 +815,7 @@ struct ahd_tmode_tstate {
struct ahd_phase_table_entry {
uint8_t phase;
uint8_t mesg_out; /* Message response to parity errors */
- char *phasemsg;
+ const char *phasemsg;
};
/************************** Serial EEPROM Format ******************************/
@@ -1314,7 +1314,7 @@ typedef int (ahd_device_setup_t)(struct ahd_softc *);
struct ahd_pci_identity {
uint64_t full_id;
uint64_t id_mask;
- char *name;
+ const char *name;
ahd_device_setup_t *setup;
};
@@ -1322,7 +1322,7 @@ struct ahd_pci_identity {
struct aic7770_identity {
uint32_t full_id;
uint32_t id_mask;
- char *name;
+ const char *name;
ahd_device_setup_t *setup;
};
extern struct aic7770_identity aic7770_ident_table [];
@@ -1333,12 +1333,11 @@ extern const int ahd_num_aic7770_devs;
/*************************** Function Declarations ****************************/
/******************************************************************************/
-void ahd_reset_cmds_pending(struct ahd_softc *ahd);
/***************************** PCI Front End *********************************/
-struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
+const struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
int ahd_pci_config(struct ahd_softc *,
- struct ahd_pci_identity *);
+ const struct ahd_pci_identity *);
int ahd_pci_test_register_access(struct ahd_softc *);
#ifdef CONFIG_PM
void ahd_pci_suspend(struct ahd_softc *);
@@ -1376,16 +1375,6 @@ int ahd_write_flexport(struct ahd_softc *ahd,
int ahd_read_flexport(struct ahd_softc *ahd, u_int addr,
uint8_t *value);
-/*************************** Interrupt Services *******************************/
-void ahd_run_qoutfifo(struct ahd_softc *ahd);
-#ifdef AHD_TARGET_MODE
-void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
-#endif
-void ahd_handle_hwerrint(struct ahd_softc *ahd);
-void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
-void ahd_handle_scsiint(struct ahd_softc *ahd,
- u_int intstat);
-
/***************************** Error Recovery *********************************/
typedef enum {
SEARCH_COMPLETE,
@@ -1479,7 +1468,7 @@ extern uint32_t ahd_debug;
void ahd_print_devinfo(struct ahd_softc *ahd,
struct ahd_devinfo *devinfo);
void ahd_dump_card_state(struct ahd_softc *ahd);
-int ahd_print_register(ahd_reg_parse_entry_t *table,
+int ahd_print_register(const ahd_reg_parse_entry_t *table,
u_int num_entries,
const char *name,
u_int address,
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
index be14e2ecb8f..cca16fc5b4a 100644
--- a/drivers/scsi/aic7xxx/aic79xx.reg
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -198,6 +198,7 @@ register SEQINTCODE {
register CLRINT {
address 0x003
access_mode WO
+ count 19
field CLRHWERRINT 0x80 /* Rev B or greater */
field CLRBRKADRINT 0x40
field CLRSWTMINT 0x20
@@ -245,6 +246,7 @@ register CLRERR {
register HCNTRL {
address 0x005
access_mode RW
+ count 12
field SEQ_RESET 0x80 /* Rev B or greater */
field POWRDN 0x40
field SWINT 0x10
@@ -262,6 +264,7 @@ register HNSCB_QOFF {
address 0x006
access_mode RW
size 2
+ count 2
}
/*
@@ -270,6 +273,7 @@ register HNSCB_QOFF {
register HESCB_QOFF {
address 0x008
access_mode RW
+ count 2
}
/*
@@ -287,6 +291,7 @@ register HS_MAILBOX {
*/
register SEQINTSTAT {
address 0x00C
+ count 1
access_mode RO
field SEQ_SWTMRTO 0x10
field SEQ_SEQINT 0x08
@@ -332,6 +337,7 @@ register SNSCB_QOFF {
*/
register SESCB_QOFF {
address 0x012
+ count 2
access_mode RW
modes M_CCHAN
}
@@ -397,6 +403,7 @@ register DFCNTRL {
address 0x019
access_mode RW
modes M_DFF0, M_DFF1
+ count 11
field PRELOADEN 0x80
field SCSIENWRDIS 0x40 /* Rev B only. */
field SCSIEN 0x20
@@ -415,6 +422,7 @@ register DFCNTRL {
*/
register DSCOMMAND0 {
address 0x019
+ count 1
access_mode RW
modes M_CFG
field CACHETHEN 0x80 /* Cache Threshold enable */
@@ -580,6 +588,7 @@ register DFF_THRSH {
address 0x088
access_mode RW
modes M_CFG
+ count 1
field WR_DFTHRSH 0x70 {
WR_DFTHRSH_MIN,
WR_DFTHRSH_25,
@@ -800,6 +809,7 @@ register PCIXCTL {
address 0x093
access_mode RW
modes M_CFG
+ count 1
field SERRPULSE 0x80
field UNEXPSCIEN 0x20
field SPLTSMADIS 0x10
@@ -844,6 +854,7 @@ register DCHSPLTSTAT0 {
address 0x096
access_mode RW
modes M_DFF0, M_DFF1
+ count 2
field STAETERM 0x80
field SCBCERR 0x40
field SCADERR 0x20
@@ -895,6 +906,7 @@ register DCHSPLTSTAT1 {
address 0x097
access_mode RW
modes M_DFF0, M_DFF1
+ count 2
field RXDATABUCKET 0x01
}
@@ -1048,6 +1060,7 @@ register SGSPLTSTAT0 {
address 0x09E
access_mode RW
modes M_DFF0, M_DFF1
+ count 2
field STAETERM 0x80
field SCBCERR 0x40
field SCADERR 0x20
@@ -1065,6 +1078,7 @@ register SGSPLTSTAT1 {
address 0x09F
access_mode RW
modes M_DFF0, M_DFF1
+ count 2
field RXDATABUCKET 0x01
}
@@ -1086,6 +1100,7 @@ register DF0PCISTAT {
address 0x0A0
access_mode RW
modes M_CFG
+ count 1
field DPE 0x80
field SSE 0x40
field RMA 0x20
@@ -1184,6 +1199,7 @@ register TARGPCISTAT {
address 0x0A7
access_mode RW
modes M_CFG
+ count 5
field DPE 0x80
field SSE 0x40
field STA 0x08
@@ -1198,6 +1214,7 @@ register LQIN {
address 0x020
access_mode RW
size 20
+ count 2
modes M_DFF0, M_DFF1, M_SCSI
}
@@ -1229,6 +1246,7 @@ register LUNPTR {
address 0x022
access_mode RW
modes M_CFG
+ count 2
}
/*
@@ -1259,6 +1277,7 @@ register CMDLENPTR {
address 0x025
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1270,6 +1289,7 @@ register ATTRPTR {
address 0x026
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1281,6 +1301,7 @@ register FLAGPTR {
address 0x027
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1291,6 +1312,7 @@ register CMDPTR {
address 0x028
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1301,6 +1323,7 @@ register QNEXTPTR {
address 0x029
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1323,6 +1346,7 @@ register ABRTBYTEPTR {
address 0x02B
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1333,6 +1357,7 @@ register ABRTBITPTR {
address 0x02C
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1370,6 +1395,7 @@ register LUNLEN {
address 0x030
access_mode RW
modes M_CFG
+ count 2
mask ILUNLEN 0x0F
mask TLUNLEN 0xF0
}
@@ -1383,6 +1409,7 @@ register CDBLIMIT {
address 0x031
access_mode RW
modes M_CFG
+ count 1
}
/*
@@ -1394,6 +1421,7 @@ register MAXCMD {
address 0x032
access_mode RW
modes M_CFG
+ count 9
}
/*
@@ -1458,6 +1486,7 @@ register LQCTL1 {
address 0x038
access_mode RW
modes M_DFF0, M_DFF1, M_SCSI
+ count 2
field PCI2PCI 0x04
field SINGLECMD 0x02
field ABORTPENDING 0x01
@@ -1470,6 +1499,7 @@ register LQCTL2 {
address 0x039
access_mode RW
modes M_DFF0, M_DFF1, M_SCSI
+ count 5
field LQIRETRY 0x80
field LQICONTINUE 0x40
field LQITOIDLE 0x20
@@ -1528,6 +1558,7 @@ register SCSISEQ1 {
address 0x03B
access_mode RW
modes M_DFF0, M_DFF1, M_SCSI
+ count 8
field MANUALCTL 0x40
field ENSELI 0x20
field ENRSELI 0x10
@@ -1667,6 +1698,9 @@ register SCSISIGO {
}
}
+/*
+ * SCSI Control Signal In
+ */
register SCSISIGI {
address 0x041
access_mode RO
@@ -1703,6 +1737,7 @@ register MULTARGID {
access_mode RW
modes M_CFG
size 2
+ count 2
}
/*
@@ -1758,6 +1793,7 @@ register TARGIDIN {
address 0x048
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 2
field CLKOUT 0x80
field TARGID 0x0F
}
@@ -1798,6 +1834,7 @@ register OPTIONMODE {
address 0x04A
access_mode RW
modes M_CFG
+ count 4
field BIOSCANCTL 0x80
field AUTOACKEN 0x40
field BIASCANCTL 0x20
@@ -1850,6 +1887,7 @@ register SIMODE0 {
address 0x04B
access_mode RW
modes M_CFG
+ count 8
field ENSELDO 0x40
field ENSELDI 0x20
field ENSELINGO 0x10
@@ -1945,6 +1983,7 @@ register PERRDIAG {
address 0x04E
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field HIZERO 0x80
field HIPERR 0x40
field PREVPHASE 0x20
@@ -1962,6 +2001,7 @@ register LQISTATE {
address 0x04E
access_mode RO
modes M_CFG
+ count 6
}
/*
@@ -1971,6 +2011,7 @@ register SOFFCNT {
address 0x04F
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 1
}
/*
@@ -1980,6 +2021,7 @@ register LQOSTATE {
address 0x04F
access_mode RO
modes M_CFG
+ count 2
}
/*
@@ -1989,6 +2031,7 @@ register LQISTAT0 {
address 0x050
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 2
field LQIATNQAS 0x20
field LQICRCT1 0x10
field LQICRCT2 0x08
@@ -2004,6 +2047,7 @@ register CLRLQIINT0 {
address 0x050
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 1
field CLRLQIATNQAS 0x20
field CLRLQICRCT1 0x10
field CLRLQICRCT2 0x08
@@ -2019,6 +2063,7 @@ register LQIMODE0 {
address 0x050
access_mode RW
modes M_CFG
+ count 3
field ENLQIATNQASK 0x20
field ENLQICRCT1 0x10
field ENLQICRCT2 0x08
@@ -2034,6 +2079,7 @@ register LQISTAT1 {
address 0x051
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field LQIPHASE_LQ 0x80
field LQIPHASE_NLQ 0x40
field LQIABORT 0x20
@@ -2051,6 +2097,7 @@ register CLRLQIINT1 {
address 0x051
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 4
field CLRLQIPHASE_LQ 0x80
field CLRLQIPHASE_NLQ 0x40
field CLRLIQABORT 0x20
@@ -2068,6 +2115,7 @@ register LQIMODE1 {
address 0x051
access_mode RW
modes M_CFG
+ count 4
field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */
field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */
field ENLIQABORT 0x20
@@ -2102,6 +2150,7 @@ register SSTAT3 {
address 0x053
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field NTRAMPERR 0x02
field OSRAMPERR 0x01
}
@@ -2113,6 +2162,7 @@ register CLRSINT3 {
address 0x053
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field CLRNTRAMPERR 0x02
field CLROSRAMPERR 0x01
}
@@ -2124,6 +2174,7 @@ register SIMODE3 {
address 0x053
access_mode RW
modes M_CFG
+ count 4
field ENNTRAMPERR 0x02
field ENOSRAMPERR 0x01
}
@@ -2135,6 +2186,7 @@ register LQOSTAT0 {
address 0x054
access_mode RO
modes M_DFF0, M_DFF1, M_SCSI
+ count 2
field LQOTARGSCBPERR 0x10
field LQOSTOPT2 0x08
field LQOATNLQ 0x04
@@ -2149,6 +2201,7 @@ register CLRLQOINT0 {
address 0x054
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 3
field CLRLQOTARGSCBPERR 0x10
field CLRLQOSTOPT2 0x08
field CLRLQOATNLQ 0x04
@@ -2163,6 +2216,7 @@ register LQOMODE0 {
address 0x054
access_mode RW
modes M_CFG
+ count 4
field ENLQOTARGSCBPERR 0x10
field ENLQOSTOPT2 0x08
field ENLQOATNLQ 0x04
@@ -2191,6 +2245,7 @@ register CLRLQOINT1 {
address 0x055
access_mode WO
modes M_DFF0, M_DFF1, M_SCSI
+ count 7
field CLRLQOINITSCBPERR 0x10
field CLRLQOSTOPI2 0x08
field CLRLQOBADQAS 0x04
@@ -2205,6 +2260,7 @@ register LQOMODE1 {
address 0x055
access_mode RW
modes M_CFG
+ count 4
field ENLQOINITSCBPERR 0x10
field ENLQOSTOPI2 0x08
field ENLQOBADQAS 0x04
@@ -2232,6 +2288,7 @@ register OS_SPACE_CNT {
address 0x056
access_mode RO
modes M_CFG
+ count 2
}
/*
@@ -2286,13 +2343,19 @@ register NEXTSCB {
modes M_SCSI
}
-/* Rev B only. */
+/*
+ * LQO SCSI Control
+ * (Rev B only.)
+ */
register LQOSCSCTL {
address 0x05A
access_mode RW
size 1
modes M_CFG
+ count 1
field LQOH2A_VERSION 0x80
+ field LQOBUSETDLY 0x40
+ field LQONOHOLDLACK 0x02
field LQONOCHKOVER 0x01
}
@@ -2459,6 +2522,7 @@ register NEGPERIOD {
address 0x061
access_mode RW
modes M_SCSI
+ count 1
}
/*
@@ -2478,6 +2542,7 @@ register NEGOFFSET {
address 0x062
access_mode RW
modes M_SCSI
+ count 1
}
/*
@@ -2487,6 +2552,7 @@ register NEGPPROPTS {
address 0x063
access_mode RW
modes M_SCSI
+ count 1
field PPROPT_PACE 0x08
field PPROPT_QAS 0x04
field PPROPT_DT 0x02
@@ -2516,12 +2582,19 @@ register ANNEXCOL {
address 0x065
access_mode RW
modes M_SCSI
+ count 7
}
+/*
+ * SCSI Check
+ * (Rev. B only)
+ */
register SCSCHKN {
address 0x066
access_mode RW
modes M_CFG
+ count 1
+ field BIDICHKDIS 0x80
field STSELSKIDDIS 0x40
field CURRFIFODEF 0x20
field WIDERESEN 0x10
@@ -2561,6 +2634,7 @@ register ANNEXDAT {
address 0x066
access_mode RW
modes M_SCSI
+ count 3
}
/*
@@ -2596,6 +2670,7 @@ register TOWNID {
address 0x069
access_mode RW
modes M_SCSI
+ count 2
}
/*
@@ -2737,6 +2812,7 @@ register SCBAUTOPTR {
address 0x0AB
access_mode RW
modes M_CFG
+ count 1
field AUSCBPTR_EN 0x80
field SCBPTR_ADDR 0x38
field SCBPTR_OFF 0x07
@@ -2881,6 +2957,7 @@ register BRDDAT {
address 0x0B8
access_mode RW
modes M_SCSI
+ count 2
}
/*
@@ -2890,6 +2967,7 @@ register BRDCTL {
address 0x0B9
access_mode RW
modes M_SCSI
+ count 7
field FLXARBACK 0x80
field FLXARBREQ 0x40
field BRDADDR 0x38
@@ -2905,6 +2983,7 @@ register SEEADR {
address 0x0BA
access_mode RW
modes M_SCSI
+ count 4
}
/*
@@ -2915,6 +2994,7 @@ register SEEDAT {
access_mode RW
size 2
modes M_SCSI
+ count 4
}
/*
@@ -2924,6 +3004,7 @@ register SEESTAT {
address 0x0BE
access_mode RO
modes M_SCSI
+ count 1
field INIT_DONE 0x80
field SEEOPCODE 0x70
field LDALTID_L 0x08
@@ -2939,6 +3020,7 @@ register SEECTL {
address 0x0BE
access_mode RW
modes M_SCSI
+ count 4
field SEEOPCODE 0x70 {
SEEOP_ERASE 0x70,
SEEOP_READ 0x60,
@@ -3000,6 +3082,7 @@ register DSPDATACTL {
address 0x0C1
access_mode RW
modes M_CFG
+ count 3
field BYPASSENAB 0x80
field DESQDIS 0x10
field RCVROFFSTDIS 0x04
@@ -3058,6 +3141,7 @@ register DSPSELECT {
address 0x0C4
access_mode RW
modes M_CFG
+ count 1
field AUTOINCEN 0x80
field DSPSEL 0x1F
}
@@ -3071,6 +3155,7 @@ register WRTBIASCTL {
address 0x0C5
access_mode WO
modes M_CFG
+ count 3
field AUTOXBCDIS 0x80
field XMITMANVAL 0x3F
}
@@ -3196,7 +3281,8 @@ register OVLYADDR {
*/
register SEQCTL0 {
address 0x0D6
- access_mode RW
+ access_mode RW
+ count 11
field PERRORDIS 0x80
field PAUSEDIS 0x40
field FAILDIS 0x20
@@ -3226,7 +3312,8 @@ register SEQCTL1 {
*/
register FLAGS {
address 0x0D8
- access_mode RO
+ access_mode RO
+ count 23
field ZERO 0x02
field CARRY 0x01
}
@@ -3255,7 +3342,8 @@ register SEQINTCTL {
*/
register SEQRAM {
address 0x0DA
- access_mode RW
+ access_mode RW
+ count 2
}
/*
@@ -3266,6 +3354,7 @@ register PRGMCNT {
address 0x0DE
access_mode RW
size 2
+ count 5
}
/*
@@ -3273,7 +3362,7 @@ register PRGMCNT {
*/
register ACCUM {
address 0x0E0
- access_mode RW
+ access_mode RW
accumulator
}
@@ -3401,6 +3490,7 @@ register INTVEC1_ADDR {
access_mode RW
size 2
modes M_CFG
+ count 1
}
/*
@@ -3412,6 +3502,7 @@ register CURADDR {
access_mode RW
size 2
modes M_SCSI
+ count 2
}
/*
@@ -3423,6 +3514,7 @@ register INTVEC2_ADDR {
access_mode RW
size 2
modes M_CFG
+ count 1
}
/*
@@ -3579,6 +3671,7 @@ scratch_ram {
/* Parameters for DMA Logic */
DMAPARAMS {
size 1
+ count 8
field PRELOADEN 0x80
field WIDEODD 0x40
field SCSIEN 0x20
@@ -3648,9 +3741,11 @@ scratch_ram {
*/
KERNEL_TQINPOS {
size 1
+ count 1
}
- TQINPOS {
+ TQINPOS {
size 1
+ count 8
}
/*
* Base address of our shared data with the kernel driver in host
@@ -3681,6 +3776,7 @@ scratch_ram {
}
ARG_2 {
size 1
+ count 1
alias RETURN_2
}
@@ -3698,6 +3794,7 @@ scratch_ram {
*/
SCSISEQ_TEMPLATE {
size 1
+ count 7
field MANUALCTL 0x40
field ENSELI 0x20
field ENRSELI 0x10
@@ -3711,6 +3808,7 @@ scratch_ram {
*/
INITIATOR_TAG {
size 1
+ count 1
}
SEQ_FLAGS2 {
@@ -3777,6 +3875,7 @@ scratch_ram {
*/
CMDSIZE_TABLE {
size 8
+ count 8
}
/*
* When an SCB with the MK_MESSAGE flag is
@@ -3803,8 +3902,8 @@ scratch_ram {
/************************* Hardware SCB Definition ****************************/
scb {
address 0x180
- size 64
- modes 0, 1, 2, 3
+ size 64
+ modes 0, 1, 2, 3
SCB_RESIDUAL_DATACNT {
size 4
alias SCB_CDB_STORE
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index ade0fb8fbdb..55508b0fcec 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -52,7 +52,7 @@
/***************************** Lookup Tables **********************************/
-static char *ahd_chip_names[] =
+static const char *const ahd_chip_names[] =
{
"NONE",
"aic7901",
@@ -66,10 +66,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
*/
struct ahd_hard_error_entry {
uint8_t errno;
- char *errmesg;
+ const char *errmesg;
};
-static struct ahd_hard_error_entry ahd_hard_errors[] = {
+static const struct ahd_hard_error_entry ahd_hard_errors[] = {
{ DSCTMOUT, "Discard Timer has timed out" },
{ ILLOPCODE, "Illegal Opcode in sequencer program" },
{ SQPARERR, "Sequencer Parity Error" },
@@ -79,7 +79,7 @@ static struct ahd_hard_error_entry ahd_hard_errors[] = {
};
static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors);
-static struct ahd_phase_table_entry ahd_phase_table[] =
+static const struct ahd_phase_table_entry ahd_phase_table[] =
{
{ P_DATAOUT, MSG_NOOP, "in Data-out phase" },
{ P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
@@ -213,7 +213,7 @@ static void ahd_dumpseq(struct ahd_softc *ahd);
#endif
static void ahd_loadseq(struct ahd_softc *ahd);
static int ahd_check_patch(struct ahd_softc *ahd,
- struct patch **start_patch,
+ const struct patch **start_patch,
u_int start_instr, u_int *skip_addr);
static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd,
u_int address);
@@ -254,7 +254,7 @@ static void ahd_freeze_devq(struct ahd_softc *ahd,
struct scb *scb);
static void ahd_handle_scb_status(struct ahd_softc *ahd,
struct scb *scb);
-static struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase);
+static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase);
static void ahd_shutdown(void *arg);
static void ahd_update_coalescing_values(struct ahd_softc *ahd,
u_int timer,
@@ -266,8 +266,774 @@ static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
int target, char channel, int lun,
u_int tag, role_t role);
-/******************************** Private Inlines *****************************/
+static void ahd_reset_cmds_pending(struct ahd_softc *ahd);
+
+/*************************** Interrupt Services *******************************/
+static void ahd_run_qoutfifo(struct ahd_softc *ahd);
+#ifdef AHD_TARGET_MODE
+static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
+#endif
+static void ahd_handle_hwerrint(struct ahd_softc *ahd);
+static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
+static void ahd_handle_scsiint(struct ahd_softc *ahd,
+ u_int intstat);
+
+/************************ Sequencer Execution Control *************************/
+void
+ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
+{
+ if (ahd->src_mode == src && ahd->dst_mode == dst)
+ return;
+#ifdef AHD_DEBUG
+ if (ahd->src_mode == AHD_MODE_UNKNOWN
+ || ahd->dst_mode == AHD_MODE_UNKNOWN)
+ panic("Setting mode prior to saving it.\n");
+ if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+ printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
+ ahd_build_mode_state(ahd, src, dst));
+#endif
+ ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
+ ahd->src_mode = src;
+ ahd->dst_mode = dst;
+}
+
+static void
+ahd_update_modes(struct ahd_softc *ahd)
+{
+ ahd_mode_state mode_ptr;
+ ahd_mode src;
+ ahd_mode dst;
+
+ mode_ptr = ahd_inb(ahd, MODE_PTR);
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+ printf("Reading mode 0x%x\n", mode_ptr);
+#endif
+ ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
+ ahd_known_modes(ahd, src, dst);
+}
+
+static void
+ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
+ ahd_mode dstmode, const char *file, int line)
+{
+#ifdef AHD_DEBUG
+ if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
+ || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
+ panic("%s:%s:%d: Mode assertion failed.\n",
+ ahd_name(ahd), file, line);
+ }
+#endif
+}
+
+#define AHD_ASSERT_MODES(ahd, source, dest) \
+ ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
+
+ahd_mode_state
+ahd_save_modes(struct ahd_softc *ahd)
+{
+ if (ahd->src_mode == AHD_MODE_UNKNOWN
+ || ahd->dst_mode == AHD_MODE_UNKNOWN)
+ ahd_update_modes(ahd);
+
+ return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
+}
+
+void
+ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
+{
+ ahd_mode src;
+ ahd_mode dst;
+
+ ahd_extract_mode_state(ahd, state, &src, &dst);
+ ahd_set_modes(ahd, src, dst);
+}
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+int
+ahd_is_paused(struct ahd_softc *ahd)
+{
+ return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop. The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+void
+ahd_pause(struct ahd_softc *ahd)
+{
+ ahd_outb(ahd, HCNTRL, ahd->pause);
+
+ /*
+ * Since the sequencer can disable pausing in a critical section, we
+ * must loop until it actually stops.
+ */
+ while (ahd_is_paused(ahd) == 0)
+ ;
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted. If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+void
+ahd_unpause(struct ahd_softc *ahd)
+{
+ /*
+ * Automatically restore our modes to those saved
+ * prior to the first change of the mode.
+ */
+ if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
+ && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
+ if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
+ ahd_reset_cmds_pending(ahd);
+ ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
+ }
+
+ if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
+ ahd_outb(ahd, HCNTRL, ahd->unpause);
+
+ ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
+}
+
+/*********************** Scatter Gather List Handling *************************/
+void *
+ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+ void *sgptr, dma_addr_t addr, bus_size_t len, int last)
+{
+ scb->sg_count++;
+ if (sizeof(dma_addr_t) > 4
+ && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg;
+
+ sg = (struct ahd_dma64_seg *)sgptr;
+ sg->addr = ahd_htole64(addr);
+ sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
+ return (sg + 1);
+ } else {
+ struct ahd_dma_seg *sg;
+ sg = (struct ahd_dma_seg *)sgptr;
+ sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
+ sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
+ | (last ? AHD_DMA_LAST_SEG : 0));
+ return (sg + 1);
+ }
+}
+
+static void
+ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
+{
+ /* XXX Handle target mode SCBs. */
+ scb->crc_retry_count = 0;
+ if ((scb->flags & SCB_PACKETIZED) != 0) {
+ /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
+ scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
+ } else {
+ if (ahd_get_transfer_length(scb) & 0x01)
+ scb->hscb->task_attribute = SCB_XFERLEN_ODD;
+ else
+ scb->hscb->task_attribute = 0;
+ }
+
+ if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
+ || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
+ scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
+ ahd_htole32(scb->sense_busaddr);
+}
+
+static void
+ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ /*
+ * Copy the first SG into the "current" data ponter area.
+ */
+ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+ struct ahd_dma64_seg *sg;
+
+ sg = (struct ahd_dma64_seg *)scb->sg_list;
+ scb->hscb->dataptr = sg->addr;
+ scb->hscb->datacnt = sg->len;
+ } else {
+ struct ahd_dma_seg *sg;
+ uint32_t *dataptr_words;
+
+ sg = (struct ahd_dma_seg *)scb->sg_list;
+ dataptr_words = (uint32_t*)&scb->hscb->dataptr;
+ dataptr_words[0] = sg->addr;
+ dataptr_words[1] = 0;
+ if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
+ uint64_t high_addr;
+
+ high_addr = ahd_le32toh(sg->len) & 0x7F000000;
+ scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
+ }
+ scb->hscb->datacnt = sg->len;
+ }
+ /*
+ * Note where to find the SG entries in bus space.
+ * We also set the full residual flag which the
+ * sequencer will clear as soon as a data transfer
+ * occurs.
+ */
+ scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
+}
+
+static void
+ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
+ scb->hscb->dataptr = 0;
+ scb->hscb->datacnt = 0;
+}
+
+/************************** Memory mapping routines ***************************/
+static void *
+ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
+{
+ dma_addr_t sg_offset;
+
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
+ return ((uint8_t *)scb->sg_list + sg_offset);
+}
+
+static uint32_t
+ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
+{
+ dma_addr_t sg_offset;
+
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
+ - ahd_sg_size(ahd);
+
+ return (scb->sg_list_busaddr + sg_offset);
+}
+
+static void
+ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+ ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
+ scb->hscb_map->dmamap,
+ /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
+ /*len*/sizeof(*scb->hscb), op);
+}
+
+void
+ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+ if (scb->sg_count == 0)
+ return;
+
+ ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
+ scb->sg_map->dmamap,
+ /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
+ /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
+}
+
+static void
+ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+ ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
+ scb->sense_map->dmamap,
+ /*offset*/scb->sense_busaddr,
+ /*len*/AHD_SENSE_BUFSIZE, op);
+}
+
+#ifdef AHD_TARGET_MODE
+static uint32_t
+ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
+{
+ return (((uint8_t *)&ahd->targetcmds[index])
+ - (uint8_t *)ahd->qoutfifo);
+}
+#endif
+
+/*********************** Miscelaneous Support Functions ***********************/
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+struct ahd_initiator_tinfo *
+ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
+ u_int remote_id, struct ahd_tmode_tstate **tstate)
+{
+ /*
+ * Transfer data structures are stored from the perspective
+ * of the target role. Since the parameters for a connection
+ * in the initiator role to a given target are the same as
+ * when the roles are reversed, we pretend we are the target.
+ */
+ if (channel == 'B')
+ our_id += 8;
+ *tstate = ahd->enabled_targets[our_id];
+ return (&(*tstate)->transinfo[remote_id]);
+}
+
+uint16_t
+ahd_inw(struct ahd_softc *ahd, u_int port)
+{
+ /*
+ * Read high byte first as some registers increment
+ * or have other side effects when the low byte is
+ * read.
+ */
+ uint16_t r = ahd_inb(ahd, port+1) << 8;
+ return r | ahd_inb(ahd, port);
+}
+
+void
+ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
+{
+ /*
+ * Write low byte first to accomodate registers
+ * such as PRGMCNT where the order maters.
+ */
+ ahd_outb(ahd, port, value & 0xFF);
+ ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+}
+
+uint32_t
+ahd_inl(struct ahd_softc *ahd, u_int port)
+{
+ return ((ahd_inb(ahd, port))
+ | (ahd_inb(ahd, port+1) << 8)
+ | (ahd_inb(ahd, port+2) << 16)
+ | (ahd_inb(ahd, port+3) << 24));
+}
+
+void
+ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
+{
+ ahd_outb(ahd, port, (value) & 0xFF);
+ ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
+ ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
+ ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
+}
+
+uint64_t
+ahd_inq(struct ahd_softc *ahd, u_int port)
+{
+ return ((ahd_inb(ahd, port))
+ | (ahd_inb(ahd, port+1) << 8)
+ | (ahd_inb(ahd, port+2) << 16)
+ | (ahd_inb(ahd, port+3) << 24)
+ | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
+ | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
+ | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
+ | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
+}
+
+void
+ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
+{
+ ahd_outb(ahd, port, value & 0xFF);
+ ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+ ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
+ ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
+ ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
+ ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
+ ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
+ ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
+}
+
+u_int
+ahd_get_scbptr(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
+}
+
+void
+ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
+{
+ AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+ ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+ ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
+ ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_hnscb_qoff(struct ahd_softc *ahd)
+{
+ return (ahd_inw_atomic(ahd, HNSCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ ahd_outw_atomic(ahd, HNSCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_hescb_qoff(struct ahd_softc *ahd)
+{
+ return (ahd_inb(ahd, HESCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ ahd_outb(ahd, HESCB_QOFF, value);
+}
+
+static u_int
+ahd_get_snscb_qoff(struct ahd_softc *ahd)
+{
+ u_int oldvalue;
+
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ oldvalue = ahd_inw(ahd, SNSCB_QOFF);
+ ahd_outw(ahd, SNSCB_QOFF, oldvalue);
+ return (oldvalue);
+}
+
+static void
+ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ ahd_outw(ahd, SNSCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_sescb_qoff(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ return (ahd_inb(ahd, SESCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ ahd_outb(ahd, SESCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_sdscb_qoff(struct ahd_softc *ahd)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
+}
+#endif
+
+static void
+ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+ AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+ ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
+ ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
+}
+
+u_int
+ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ u_int value;
+
+ /*
+ * Workaround PCI-X Rev A. hardware bug.
+ * After a host read of SCB memory, the chip
+ * may become confused into thinking prefetch
+ * was required. This starts the discard timer
+ * running and can cause an unexpected discard
+ * timer interrupt. The work around is to read
+ * a normal register prior to the exhaustion of
+ * the discard timer. The mode pointer register
+ * has no side effects and so serves well for
+ * this purpose.
+ *
+ * Razor #528
+ */
+ value = ahd_inb(ahd, offset);
+ if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
+ ahd_inb(ahd, MODE_PTR);
+ return (value);
+}
+
+u_int
+ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ return (ahd_inb_scbram(ahd, offset)
+ | (ahd_inb_scbram(ahd, offset+1) << 8));
+}
+
+static uint32_t
+ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ return (ahd_inw_scbram(ahd, offset)
+ | (ahd_inw_scbram(ahd, offset+2) << 16));
+}
+
+static uint64_t
+ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
+{
+ return (ahd_inl_scbram(ahd, offset)
+ | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
+}
+
+struct scb *
+ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
+{
+ struct scb* scb;
+
+ if (tag >= AHD_SCB_MAX)
+ return (NULL);
+ scb = ahd->scb_data.scbindex[tag];
+ if (scb != NULL)
+ ahd_sync_scb(ahd, scb,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ return (scb);
+}
+
+static void
+ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
+{
+ struct hardware_scb *q_hscb;
+ struct map_node *q_hscb_map;
+ uint32_t saved_hscb_busaddr;
+
+ /*
+ * Our queuing method is a bit tricky. The card
+ * knows in advance which HSCB (by address) to download,
+ * and we can't disappoint it. To achieve this, the next
+ * HSCB to download is saved off in ahd->next_queued_hscb.
+ * When we are called to queue "an arbitrary scb",
+ * we copy the contents of the incoming HSCB to the one
+ * the sequencer knows about, swap HSCB pointers and
+ * finally assign the SCB to the tag indexed location
+ * in the scb_array. This makes sure that we can still
+ * locate the correct SCB by SCB_TAG.
+ */
+ q_hscb = ahd->next_queued_hscb;
+ q_hscb_map = ahd->next_queued_hscb_map;
+ saved_hscb_busaddr = q_hscb->hscb_busaddr;
+ memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+ q_hscb->hscb_busaddr = saved_hscb_busaddr;
+ q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
+
+ /* Now swap HSCB pointers. */
+ ahd->next_queued_hscb = scb->hscb;
+ ahd->next_queued_hscb_map = scb->hscb_map;
+ scb->hscb = q_hscb;
+ scb->hscb_map = q_hscb_map;
+
+ /* Now define the mapping from tag to SCB in the scbindex */
+ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
+ */
+void
+ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+ ahd_swap_with_next_hscb(ahd, scb);
+
+ if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
+ panic("Attempt to queue invalid SCB tag %x\n",
+ SCB_GET_TAG(scb));
+
+ /*
+ * Keep a history of SCBs we've downloaded in the qinfifo.
+ */
+ ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
+ ahd->qinfifonext++;
+
+ if (scb->sg_count != 0)
+ ahd_setup_data_scb(ahd, scb);
+ else
+ ahd_setup_noxfer_scb(ahd, scb);
+ ahd_setup_scb_common(ahd, scb);
+
+ /*
+ * Make sure our data is consistent from the
+ * perspective of the adapter.
+ */
+ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+#ifdef AHD_DEBUG
+ if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
+ uint64_t host_dataptr;
+
+ host_dataptr = ahd_le64toh(scb->hscb->dataptr);
+ printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
+ ahd_name(ahd),
+ SCB_GET_TAG(scb), scb->hscb->scsiid,
+ ahd_le32toh(scb->hscb->hscb_busaddr),
+ (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
+ (u_int)(host_dataptr & 0xFFFFFFFF),
+ ahd_le32toh(scb->hscb->datacnt));
+ }
+#endif
+ /* Tell the adapter about the newly queued SCB */
+ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+}
+
+/************************** Interrupt Processing ******************************/
+static void
+ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
+{
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
+ /*offset*/0,
+ /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
+}
+
+static void
+ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
+{
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0) {
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+ ahd->shared_data_map.dmamap,
+ ahd_targetcmd_offset(ahd, 0),
+ sizeof(struct target_cmd) * AHD_TMODE_CMDS,
+ op);
+ }
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHD_RUN_QOUTFIFO 0x1
+#define AHD_RUN_TQINFIFO 0x2
+static u_int
+ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
+{
+ u_int retval;
+
+ retval = 0;
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
+ /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
+ /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
+ if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
+ == ahd->qoutfifonext_valid_tag)
+ retval |= AHD_RUN_QOUTFIFO;
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0
+ && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
+ ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+ ahd->shared_data_map.dmamap,
+ ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
+ /*len*/sizeof(struct target_cmd),
+ BUS_DMASYNC_POSTREAD);
+ if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
+ retval |= AHD_RUN_TQINFIFO;
+ }
+#endif
+ return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+int
+ahd_intr(struct ahd_softc *ahd)
+{
+ u_int intstat;
+
+ if ((ahd->pause & INTEN) == 0) {
+ /*
+ * Our interrupt is not enabled on the chip
+ * and may be disabled for re-entrancy reasons,
+ * so just return. This is likely just a shared
+ * interrupt.
+ */
+ return (0);
+ }
+
+ /*
+ * Instead of directly reading the interrupt status register,
+ * infer the cause of the interrupt by checking our in-core
+ * completion queues. This avoids a costly PCI bus read in
+ * most cases.
+ */
+ if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
+ && (ahd_check_cmdcmpltqueues(ahd) != 0))
+ intstat = CMDCMPLT;
+ else
+ intstat = ahd_inb(ahd, INTSTAT);
+
+ if ((intstat & INT_PEND) == 0)
+ return (0);
+
+ if (intstat & CMDCMPLT) {
+ ahd_outb(ahd, CLRINT, CLRCMDINT);
+
+ /*
+ * Ensure that the chip sees that we've cleared
+ * this interrupt before we walk the output fifo.
+ * Otherwise, we may, due to posted bus writes,
+ * clear the interrupt after we finish the scan,
+ * and after the sequencer has added new entries
+ * and asserted the interrupt again.
+ */
+ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+ if (ahd_is_paused(ahd)) {
+ /*
+ * Potentially lost SEQINT.
+ * If SEQINTCODE is non-zero,
+ * simulate the SEQINT.
+ */
+ if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
+ intstat |= SEQINT;
+ }
+ } else {
+ ahd_flush_device_writes(ahd);
+ }
+ ahd_run_qoutfifo(ahd);
+ ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
+ ahd->cmdcmplt_total++;
+#ifdef AHD_TARGET_MODE
+ if ((ahd->flags & AHD_TARGETROLE) != 0)
+ ahd_run_tqinfifo(ahd, /*paused*/FALSE);
+#endif
+ }
+
+ /*
+ * Handle statuses that may invalidate our cached
+ * copy of INTSTAT separately.
+ */
+ if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
+ /* Hot eject. Do nothing */
+ } else if (intstat & HWERRINT) {
+ ahd_handle_hwerrint(ahd);
+ } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
+ ahd->bus_intr(ahd);
+ } else {
+
+ if ((intstat & SEQINT) != 0)
+ ahd_handle_seqint(ahd, intstat);
+
+ if ((intstat & SCSIINT) != 0)
+ ahd_handle_scsiint(ahd, intstat);
+ }
+ return (1);
+}
+
+/******************************** Private Inlines *****************************/
static __inline void
ahd_assert_atn(struct ahd_softc *ahd)
{
@@ -280,7 +1046,7 @@ ahd_assert_atn(struct ahd_softc *ahd)
* are currently in a packetized transfer. We could
* just as easily be sending or receiving a message.
*/
-static __inline int
+static int
ahd_currently_packetized(struct ahd_softc *ahd)
{
ahd_mode_state saved_modes;
@@ -896,7 +1662,7 @@ clrchn:
* a copy of the first byte (little endian) of the sgptr
* hscb field.
*/
-void
+static void
ahd_run_qoutfifo(struct ahd_softc *ahd)
{
struct ahd_completion *completion;
@@ -935,7 +1701,7 @@ ahd_run_qoutfifo(struct ahd_softc *ahd)
}
/************************* Interrupt Handling *********************************/
-void
+static void
ahd_handle_hwerrint(struct ahd_softc *ahd)
{
/*
@@ -1009,7 +1775,7 @@ ahd_dump_sglist(struct scb *scb)
}
#endif /* AHD_DEBUG */
-void
+static void
ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
{
u_int seqintcode;
@@ -1621,7 +2387,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
ahd_unpause(ahd);
}
-void
+static void
ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
{
struct scb *scb;
@@ -3571,11 +4337,11 @@ ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
devinfo->target, devinfo->lun);
}
-static struct ahd_phase_table_entry*
+static const struct ahd_phase_table_entry*
ahd_lookup_phase_entry(int phase)
{
- struct ahd_phase_table_entry *entry;
- struct ahd_phase_table_entry *last_entry;
+ const struct ahd_phase_table_entry *entry;
+ const struct ahd_phase_table_entry *last_entry;
/*
* num_phases doesn't include the default entry which
@@ -3941,7 +4707,7 @@ ahd_clear_msg_state(struct ahd_softc *ahd)
*/
static void
ahd_handle_message_phase(struct ahd_softc *ahd)
-{
+{
struct ahd_devinfo devinfo;
u_int bus_phase;
int end_session;
@@ -5983,8 +6749,7 @@ found:
*/
void
ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
-{
-
+{
/* Clean up for the next user */
scb->flags = SCB_FLAG_NONE;
scb->hscb->control = 0;
@@ -6272,6 +7037,24 @@ static const char *termstat_strings[] = {
"Not Configured"
};
+/***************************** Timer Facilities *******************************/
+#define ahd_timer_init init_timer
+#define ahd_timer_stop del_timer_sync
+typedef void ahd_linux_callback_t (u_long);
+
+static void
+ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
+{
+ struct ahd_softc *ahd;
+
+ ahd = (struct ahd_softc *)arg;
+ del_timer(timer);
+ timer->data = (u_long)arg;
+ timer->expires = jiffies + (usec * HZ)/1000000;
+ timer->function = (ahd_linux_callback_t*)func;
+ add_timer(timer);
+}
+
/*
* Start the board, ready for normal operation
*/
@@ -7370,7 +8153,7 @@ ahd_qinfifo_count(struct ahd_softc *ahd)
+ ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos);
}
-void
+static void
ahd_reset_cmds_pending(struct ahd_softc *ahd)
{
struct scb *scb;
@@ -8571,7 +9354,7 @@ ahd_loadseq(struct ahd_softc *ahd)
struct cs cs_table[num_critical_sections];
u_int begin_set[num_critical_sections];
u_int end_set[num_critical_sections];
- struct patch *cur_patch;
+ const struct patch *cur_patch;
u_int cs_count;
u_int cur_cs;
u_int i;
@@ -8726,11 +9509,11 @@ ahd_loadseq(struct ahd_softc *ahd)
}
static int
-ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
+ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch,
u_int start_instr, u_int *skip_addr)
{
- struct patch *cur_patch;
- struct patch *last_patch;
+ const struct patch *cur_patch;
+ const struct patch *last_patch;
u_int num_patches;
num_patches = ARRAY_SIZE(patches);
@@ -8764,7 +9547,7 @@ ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
static u_int
ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
{
- struct patch *cur_patch;
+ const struct patch *cur_patch;
int address_offset;
u_int skip_addr;
u_int i;
@@ -8895,7 +9678,7 @@ sized:
}
int
-ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
+ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
const char *name, u_int address, u_int value,
u_int *cur_column, u_int wrap_point)
{
@@ -9886,7 +10669,7 @@ ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
#endif
}
-void
+static void
ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
{
struct target_cmd *cmd;
diff --git a/drivers/scsi/aic7xxx/aic79xx_inline.h b/drivers/scsi/aic7xxx/aic79xx_inline.h
index 45e55575a0f..5f12cf9d99d 100644
--- a/drivers/scsi/aic7xxx/aic79xx_inline.h
+++ b/drivers/scsi/aic7xxx/aic79xx_inline.h
@@ -63,18 +63,15 @@ static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
static __inline void ahd_extract_mode_state(struct ahd_softc *ahd,
ahd_mode_state state,
ahd_mode *src, ahd_mode *dst);
-static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
- ahd_mode dst);
-static __inline void ahd_update_modes(struct ahd_softc *ahd);
-static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
- ahd_mode dstmode, const char *file,
- int line);
-static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
-static __inline void ahd_restore_modes(struct ahd_softc *ahd,
- ahd_mode_state state);
-static __inline int ahd_is_paused(struct ahd_softc *ahd);
-static __inline void ahd_pause(struct ahd_softc *ahd);
-static __inline void ahd_unpause(struct ahd_softc *ahd);
+
+void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
+ ahd_mode dst);
+ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
+void ahd_restore_modes(struct ahd_softc *ahd,
+ ahd_mode_state state);
+int ahd_is_paused(struct ahd_softc *ahd);
+void ahd_pause(struct ahd_softc *ahd);
+void ahd_unpause(struct ahd_softc *ahd);
static __inline void
ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
@@ -99,256 +96,16 @@ ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
*dst = (state & DST_MODE) >> DST_MODE_SHIFT;
}
-static __inline void
-ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
-{
- if (ahd->src_mode == src && ahd->dst_mode == dst)
- return;
-#ifdef AHD_DEBUG
- if (ahd->src_mode == AHD_MODE_UNKNOWN
- || ahd->dst_mode == AHD_MODE_UNKNOWN)
- panic("Setting mode prior to saving it.\n");
- if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
- printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
- ahd_build_mode_state(ahd, src, dst));
-#endif
- ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
- ahd->src_mode = src;
- ahd->dst_mode = dst;
-}
-
-static __inline void
-ahd_update_modes(struct ahd_softc *ahd)
-{
- ahd_mode_state mode_ptr;
- ahd_mode src;
- ahd_mode dst;
-
- mode_ptr = ahd_inb(ahd, MODE_PTR);
-#ifdef AHD_DEBUG
- if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
- printf("Reading mode 0x%x\n", mode_ptr);
-#endif
- ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
- ahd_known_modes(ahd, src, dst);
-}
-
-static __inline void
-ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
- ahd_mode dstmode, const char *file, int line)
-{
-#ifdef AHD_DEBUG
- if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
- || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
- panic("%s:%s:%d: Mode assertion failed.\n",
- ahd_name(ahd), file, line);
- }
-#endif
-}
-
-static __inline ahd_mode_state
-ahd_save_modes(struct ahd_softc *ahd)
-{
- if (ahd->src_mode == AHD_MODE_UNKNOWN
- || ahd->dst_mode == AHD_MODE_UNKNOWN)
- ahd_update_modes(ahd);
-
- return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
-}
-
-static __inline void
-ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
-{
- ahd_mode src;
- ahd_mode dst;
-
- ahd_extract_mode_state(ahd, state, &src, &dst);
- ahd_set_modes(ahd, src, dst);
-}
-
-#define AHD_ASSERT_MODES(ahd, source, dest) \
- ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
-
-/*
- * Determine whether the sequencer has halted code execution.
- * Returns non-zero status if the sequencer is stopped.
- */
-static __inline int
-ahd_is_paused(struct ahd_softc *ahd)
-{
- return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
-}
-
-/*
- * Request that the sequencer stop and wait, indefinitely, for it
- * to stop. The sequencer will only acknowledge that it is paused
- * once it has reached an instruction boundary and PAUSEDIS is
- * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
- * for critical sections.
- */
-static __inline void
-ahd_pause(struct ahd_softc *ahd)
-{
- ahd_outb(ahd, HCNTRL, ahd->pause);
-
- /*
- * Since the sequencer can disable pausing in a critical section, we
- * must loop until it actually stops.
- */
- while (ahd_is_paused(ahd) == 0)
- ;
-}
-
-/*
- * Allow the sequencer to continue program execution.
- * We check here to ensure that no additional interrupt
- * sources that would cause the sequencer to halt have been
- * asserted. If, for example, a SCSI bus reset is detected
- * while we are fielding a different, pausing, interrupt type,
- * we don't want to release the sequencer before going back
- * into our interrupt handler and dealing with this new
- * condition.
- */
-static __inline void
-ahd_unpause(struct ahd_softc *ahd)
-{
- /*
- * Automatically restore our modes to those saved
- * prior to the first change of the mode.
- */
- if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
- && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
- if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
- ahd_reset_cmds_pending(ahd);
- ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
- }
-
- if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
- ahd_outb(ahd, HCNTRL, ahd->unpause);
-
- ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
-}
-
/*********************** Scatter Gather List Handling *************************/
-static __inline void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
- void *sgptr, dma_addr_t addr,
- bus_size_t len, int last);
-static __inline void ahd_setup_scb_common(struct ahd_softc *ahd,
- struct scb *scb);
-static __inline void ahd_setup_data_scb(struct ahd_softc *ahd,
- struct scb *scb);
-static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd,
- struct scb *scb);
-
-static __inline void *
-ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
- void *sgptr, dma_addr_t addr, bus_size_t len, int last)
-{
- scb->sg_count++;
- if (sizeof(dma_addr_t) > 4
- && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
- struct ahd_dma64_seg *sg;
-
- sg = (struct ahd_dma64_seg *)sgptr;
- sg->addr = ahd_htole64(addr);
- sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
- return (sg + 1);
- } else {
- struct ahd_dma_seg *sg;
-
- sg = (struct ahd_dma_seg *)sgptr;
- sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
- sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
- | (last ? AHD_DMA_LAST_SEG : 0));
- return (sg + 1);
- }
-}
-
-static __inline void
-ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
-{
- /* XXX Handle target mode SCBs. */
- scb->crc_retry_count = 0;
- if ((scb->flags & SCB_PACKETIZED) != 0) {
- /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */
- scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
- } else {
- if (ahd_get_transfer_length(scb) & 0x01)
- scb->hscb->task_attribute = SCB_XFERLEN_ODD;
- else
- scb->hscb->task_attribute = 0;
- }
-
- if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
- || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
- scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
- ahd_htole32(scb->sense_busaddr);
-}
-
-static __inline void
-ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
-{
- /*
- * Copy the first SG into the "current" data ponter area.
- */
- if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
- struct ahd_dma64_seg *sg;
-
- sg = (struct ahd_dma64_seg *)scb->sg_list;
- scb->hscb->dataptr = sg->addr;
- scb->hscb->datacnt = sg->len;
- } else {
- struct ahd_dma_seg *sg;
- uint32_t *dataptr_words;
-
- sg = (struct ahd_dma_seg *)scb->sg_list;
- dataptr_words = (uint32_t*)&scb->hscb->dataptr;
- dataptr_words[0] = sg->addr;
- dataptr_words[1] = 0;
- if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
- uint64_t high_addr;
-
- high_addr = ahd_le32toh(sg->len) & 0x7F000000;
- scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
- }
- scb->hscb->datacnt = sg->len;
- }
- /*
- * Note where to find the SG entries in bus space.
- * We also set the full residual flag which the
- * sequencer will clear as soon as a data transfer
- * occurs.
- */
- scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
-}
-
-static __inline void
-ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
-{
- scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
- scb->hscb->dataptr = 0;
- scb->hscb->datacnt = 0;
-}
+void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+ void *sgptr, dma_addr_t addr,
+ bus_size_t len, int last);
/************************** Memory mapping routines ***************************/
static __inline size_t ahd_sg_size(struct ahd_softc *ahd);
-static __inline void *
- ahd_sg_bus_to_virt(struct ahd_softc *ahd,
- struct scb *scb,
- uint32_t sg_busaddr);
-static __inline uint32_t
- ahd_sg_virt_to_bus(struct ahd_softc *ahd,
- struct scb *scb,
- void *sg);
-static __inline void ahd_sync_scb(struct ahd_softc *ahd,
- struct scb *scb, int op);
-static __inline void ahd_sync_sglist(struct ahd_softc *ahd,
- struct scb *scb, int op);
-static __inline void ahd_sync_sense(struct ahd_softc *ahd,
- struct scb *scb, int op);
-static __inline uint32_t
- ahd_targetcmd_offset(struct ahd_softc *ahd,
- u_int index);
+
+void ahd_sync_sglist(struct ahd_softc *ahd,
+ struct scb *scb, int op);
static __inline size_t
ahd_sg_size(struct ahd_softc *ahd)
@@ -358,104 +115,32 @@ ahd_sg_size(struct ahd_softc *ahd)
return (sizeof(struct ahd_dma_seg));
}
-static __inline void *
-ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
-{
- dma_addr_t sg_offset;
-
- /* sg_list_phys points to entry 1, not 0 */
- sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
- return ((uint8_t *)scb->sg_list + sg_offset);
-}
-
-static __inline uint32_t
-ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
-{
- dma_addr_t sg_offset;
-
- /* sg_list_phys points to entry 1, not 0 */
- sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
- - ahd_sg_size(ahd);
-
- return (scb->sg_list_busaddr + sg_offset);
-}
-
-static __inline void
-ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
-{
- ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
- scb->hscb_map->dmamap,
- /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
- /*len*/sizeof(*scb->hscb), op);
-}
-
-static __inline void
-ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
-{
- if (scb->sg_count == 0)
- return;
-
- ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
- scb->sg_map->dmamap,
- /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
- /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
-}
-
-static __inline void
-ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
-{
- ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
- scb->sense_map->dmamap,
- /*offset*/scb->sense_busaddr,
- /*len*/AHD_SENSE_BUFSIZE, op);
-}
-
-static __inline uint32_t
-ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
-{
- return (((uint8_t *)&ahd->targetcmds[index])
- - (uint8_t *)ahd->qoutfifo);
-}
-
/*********************** Miscellaneous Support Functions ***********************/
-static __inline struct ahd_initiator_tinfo *
- ahd_fetch_transinfo(struct ahd_softc *ahd,
- char channel, u_int our_id,
- u_int remote_id,
- struct ahd_tmode_tstate **tstate);
-static __inline uint16_t
- ahd_inw(struct ahd_softc *ahd, u_int port);
-static __inline void ahd_outw(struct ahd_softc *ahd, u_int port,
- u_int value);
-static __inline uint32_t
- ahd_inl(struct ahd_softc *ahd, u_int port);
-static __inline void ahd_outl(struct ahd_softc *ahd, u_int port,
- uint32_t value);
-static __inline uint64_t
- ahd_inq(struct ahd_softc *ahd, u_int port);
-static __inline void ahd_outq(struct ahd_softc *ahd, u_int port,
- uint64_t value);
-static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd);
-static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
-static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd);
-static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline uint32_t
- ahd_inl_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline uint64_t
- ahd_inq_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd,
- struct scb *scb);
-static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
+struct ahd_initiator_tinfo *
+ ahd_fetch_transinfo(struct ahd_softc *ahd,
+ char channel, u_int our_id,
+ u_int remote_id,
+ struct ahd_tmode_tstate **tstate);
+uint16_t
+ ahd_inw(struct ahd_softc *ahd, u_int port);
+void ahd_outw(struct ahd_softc *ahd, u_int port,
+ u_int value);
+uint32_t
+ ahd_inl(struct ahd_softc *ahd, u_int port);
+void ahd_outl(struct ahd_softc *ahd, u_int port,
+ uint32_t value);
+uint64_t
+ ahd_inq(struct ahd_softc *ahd, u_int port);
+void ahd_outq(struct ahd_softc *ahd, u_int port,
+ uint64_t value);
+u_int ahd_get_scbptr(struct ahd_softc *ahd);
+void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
+u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
+u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
+struct scb *
+ ahd_lookup_scb(struct ahd_softc *ahd, u_int tag);
+void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
+
static __inline uint8_t *
ahd_get_sense_buf(struct ahd_softc *ahd,
struct scb *scb);
@@ -463,25 +148,7 @@ static __inline uint32_t
ahd_get_sense_bufaddr(struct ahd_softc *ahd,
struct scb *scb);
-/*
- * Return pointers to the transfer negotiation information
- * for the specified our_id/remote_id pair.
- */
-static __inline struct ahd_initiator_tinfo *
-ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
- u_int remote_id, struct ahd_tmode_tstate **tstate)
-{
- /*
- * Transfer data structures are stored from the perspective
- * of the target role. Since the parameters for a connection
- * in the initiator role to a given target are the same as
- * when the roles are reversed, we pretend we are the target.
- */
- if (channel == 'B')
- our_id += 8;
- *tstate = ahd->enabled_targets[our_id];
- return (&(*tstate)->transinfo[remote_id]);
-}
+#if 0 /* unused */
#define AHD_COPY_COL_IDX(dst, src) \
do { \
@@ -489,304 +156,7 @@ do { \
dst->hscb->lun = src->hscb->lun; \
} while (0)
-static __inline uint16_t
-ahd_inw(struct ahd_softc *ahd, u_int port)
-{
- /*
- * Read high byte first as some registers increment
- * or have other side effects when the low byte is
- * read.
- */
- uint16_t r = ahd_inb(ahd, port+1) << 8;
- return r | ahd_inb(ahd, port);
-}
-
-static __inline void
-ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
-{
- /*
- * Write low byte first to accomodate registers
- * such as PRGMCNT where the order maters.
- */
- ahd_outb(ahd, port, value & 0xFF);
- ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
-}
-
-static __inline uint32_t
-ahd_inl(struct ahd_softc *ahd, u_int port)
-{
- return ((ahd_inb(ahd, port))
- | (ahd_inb(ahd, port+1) << 8)
- | (ahd_inb(ahd, port+2) << 16)
- | (ahd_inb(ahd, port+3) << 24));
-}
-
-static __inline void
-ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
-{
- ahd_outb(ahd, port, (value) & 0xFF);
- ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
- ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
- ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
-}
-
-static __inline uint64_t
-ahd_inq(struct ahd_softc *ahd, u_int port)
-{
- return ((ahd_inb(ahd, port))
- | (ahd_inb(ahd, port+1) << 8)
- | (ahd_inb(ahd, port+2) << 16)
- | (ahd_inb(ahd, port+3) << 24)
- | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
- | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
- | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
- | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
-}
-
-static __inline void
-ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
-{
- ahd_outb(ahd, port, value & 0xFF);
- ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
- ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
- ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
- ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
- ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
- ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
- ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
-}
-
-static __inline u_int
-ahd_get_scbptr(struct ahd_softc *ahd)
-{
- AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
- ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
- return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
-}
-
-static __inline void
-ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
-{
- AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
- ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
- ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
- ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
-}
-
-static __inline u_int
-ahd_get_hnscb_qoff(struct ahd_softc *ahd)
-{
- return (ahd_inw_atomic(ahd, HNSCB_QOFF));
-}
-
-static __inline void
-ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
-{
- ahd_outw_atomic(ahd, HNSCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_hescb_qoff(struct ahd_softc *ahd)
-{
- return (ahd_inb(ahd, HESCB_QOFF));
-}
-
-static __inline void
-ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
-{
- ahd_outb(ahd, HESCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_snscb_qoff(struct ahd_softc *ahd)
-{
- u_int oldvalue;
-
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- oldvalue = ahd_inw(ahd, SNSCB_QOFF);
- ahd_outw(ahd, SNSCB_QOFF, oldvalue);
- return (oldvalue);
-}
-
-static __inline void
-ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- ahd_outw(ahd, SNSCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_sescb_qoff(struct ahd_softc *ahd)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- return (ahd_inb(ahd, SESCB_QOFF));
-}
-
-static __inline void
-ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- ahd_outb(ahd, SESCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_sdscb_qoff(struct ahd_softc *ahd)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
-}
-
-static __inline void
-ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
-{
- AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
- ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
- ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
-}
-
-static __inline u_int
-ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
-{
- u_int value;
-
- /*
- * Workaround PCI-X Rev A. hardware bug.
- * After a host read of SCB memory, the chip
- * may become confused into thinking prefetch
- * was required. This starts the discard timer
- * running and can cause an unexpected discard
- * timer interrupt. The work around is to read
- * a normal register prior to the exhaustion of
- * the discard timer. The mode pointer register
- * has no side effects and so serves well for
- * this purpose.
- *
- * Razor #528
- */
- value = ahd_inb(ahd, offset);
- if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
- ahd_inb(ahd, MODE_PTR);
- return (value);
-}
-
-static __inline u_int
-ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
-{
- return (ahd_inb_scbram(ahd, offset)
- | (ahd_inb_scbram(ahd, offset+1) << 8));
-}
-
-static __inline uint32_t
-ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
-{
- return (ahd_inw_scbram(ahd, offset)
- | (ahd_inw_scbram(ahd, offset+2) << 16));
-}
-
-static __inline uint64_t
-ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
-{
- return (ahd_inl_scbram(ahd, offset)
- | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
-}
-
-static __inline struct scb *
-ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
-{
- struct scb* scb;
-
- if (tag >= AHD_SCB_MAX)
- return (NULL);
- scb = ahd->scb_data.scbindex[tag];
- if (scb != NULL)
- ahd_sync_scb(ahd, scb,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
- return (scb);
-}
-
-static __inline void
-ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
-{
- struct hardware_scb *q_hscb;
- struct map_node *q_hscb_map;
- uint32_t saved_hscb_busaddr;
-
- /*
- * Our queuing method is a bit tricky. The card
- * knows in advance which HSCB (by address) to download,
- * and we can't disappoint it. To achieve this, the next
- * HSCB to download is saved off in ahd->next_queued_hscb.
- * When we are called to queue "an arbitrary scb",
- * we copy the contents of the incoming HSCB to the one
- * the sequencer knows about, swap HSCB pointers and
- * finally assign the SCB to the tag indexed location
- * in the scb_array. This makes sure that we can still
- * locate the correct SCB by SCB_TAG.
- */
- q_hscb = ahd->next_queued_hscb;
- q_hscb_map = ahd->next_queued_hscb_map;
- saved_hscb_busaddr = q_hscb->hscb_busaddr;
- memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
- q_hscb->hscb_busaddr = saved_hscb_busaddr;
- q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
-
- /* Now swap HSCB pointers. */
- ahd->next_queued_hscb = scb->hscb;
- ahd->next_queued_hscb_map = scb->hscb_map;
- scb->hscb = q_hscb;
- scb->hscb_map = q_hscb_map;
-
- /* Now define the mapping from tag to SCB in the scbindex */
- ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
-}
-
-/*
- * Tell the sequencer about a new transaction to execute.
- */
-static __inline void
-ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
-{
- ahd_swap_with_next_hscb(ahd, scb);
-
- if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
- panic("Attempt to queue invalid SCB tag %x\n",
- SCB_GET_TAG(scb));
-
- /*
- * Keep a history of SCBs we've downloaded in the qinfifo.
- */
- ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
- ahd->qinfifonext++;
-
- if (scb->sg_count != 0)
- ahd_setup_data_scb(ahd, scb);
- else
- ahd_setup_noxfer_scb(ahd, scb);
- ahd_setup_scb_common(ahd, scb);
-
- /*
- * Make sure our data is consistent from the
- * perspective of the adapter.
- */
- ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
-
-#ifdef AHD_DEBUG
- if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
- uint64_t host_dataptr;
-
- host_dataptr = ahd_le64toh(scb->hscb->dataptr);
- printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
- ahd_name(ahd),
- SCB_GET_TAG(scb), scb->hscb->scsiid,
- ahd_le32toh(scb->hscb->hscb_busaddr),
- (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
- (u_int)(host_dataptr & 0xFFFFFFFF),
- ahd_le32toh(scb->hscb->datacnt));
- }
#endif
- /* Tell the adapter about the newly queued SCB */
- ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
-}
static __inline uint8_t *
ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
@@ -801,151 +171,6 @@ ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
}
/************************** Interrupt Processing ******************************/
-static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op);
-static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op);
-static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd);
-static __inline int ahd_intr(struct ahd_softc *ahd);
-
-static __inline void
-ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
-{
- ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
- /*offset*/0,
- /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
-}
-
-static __inline void
-ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
-{
-#ifdef AHD_TARGET_MODE
- if ((ahd->flags & AHD_TARGETROLE) != 0) {
- ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
- ahd->shared_data_map.dmamap,
- ahd_targetcmd_offset(ahd, 0),
- sizeof(struct target_cmd) * AHD_TMODE_CMDS,
- op);
- }
-#endif
-}
-
-/*
- * See if the firmware has posted any completed commands
- * into our in-core command complete fifos.
- */
-#define AHD_RUN_QOUTFIFO 0x1
-#define AHD_RUN_TQINFIFO 0x2
-static __inline u_int
-ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
-{
- u_int retval;
-
- retval = 0;
- ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
- /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
- /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
- if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
- == ahd->qoutfifonext_valid_tag)
- retval |= AHD_RUN_QOUTFIFO;
-#ifdef AHD_TARGET_MODE
- if ((ahd->flags & AHD_TARGETROLE) != 0
- && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
- ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
- ahd->shared_data_map.dmamap,
- ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
- /*len*/sizeof(struct target_cmd),
- BUS_DMASYNC_POSTREAD);
- if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
- retval |= AHD_RUN_TQINFIFO;
- }
-#endif
- return (retval);
-}
-
-/*
- * Catch an interrupt from the adapter
- */
-static __inline int
-ahd_intr(struct ahd_softc *ahd)
-{
- u_int intstat;
-
- if ((ahd->pause & INTEN) == 0) {
- /*
- * Our interrupt is not enabled on the chip
- * and may be disabled for re-entrancy reasons,
- * so just return. This is likely just a shared
- * interrupt.
- */
- return (0);
- }
-
- /*
- * Instead of directly reading the interrupt status register,
- * infer the cause of the interrupt by checking our in-core
- * completion queues. This avoids a costly PCI bus read in
- * most cases.
- */
- if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
- && (ahd_check_cmdcmpltqueues(ahd) != 0))
- intstat = CMDCMPLT;
- else
- intstat = ahd_inb(ahd, INTSTAT);
-
- if ((intstat & INT_PEND) == 0)
- return (0);
-
- if (intstat & CMDCMPLT) {
- ahd_outb(ahd, CLRINT, CLRCMDINT);
-
- /*
- * Ensure that the chip sees that we've cleared
- * this interrupt before we walk the output fifo.
- * Otherwise, we may, due to posted bus writes,
- * clear the interrupt after we finish the scan,
- * and after the sequencer has added new entries
- * and asserted the interrupt again.
- */
- if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
- if (ahd_is_paused(ahd)) {
- /*
- * Potentially lost SEQINT.
- * If SEQINTCODE is non-zero,
- * simulate the SEQINT.
- */
- if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
- intstat |= SEQINT;
- }
- } else {
- ahd_flush_device_writes(ahd);
- }
- ahd_run_qoutfifo(ahd);
- ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
- ahd->cmdcmplt_total++;
-#ifdef AHD_TARGET_MODE
- if ((ahd->flags & AHD_TARGETROLE) != 0)
- ahd_run_tqinfifo(ahd, /*paused*/FALSE);
-#endif
- }
-
- /*
- * Handle statuses that may invalidate our cached
- * copy of INTSTAT separately.
- */
- if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
- /* Hot eject. Do nothing */
- } else if (intstat & HWERRINT) {
- ahd_handle_hwerrint(ahd);
- } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
- ahd->bus_intr(ahd);
- } else {
-
- if ((intstat & SEQINT) != 0)
- ahd_handle_seqint(ahd, intstat);
-
- if ((intstat & SCSIINT) != 0)
- ahd_handle_scsiint(ahd, intstat);
- }
- return (1);
-}
+int ahd_intr(struct ahd_softc *ahd);
#endif /* _AIC79XX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 0081aa357c8..0f829b3b8ab 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -193,7 +193,7 @@ struct ahd_linux_iocell_opts
#define AIC79XX_PRECOMP_INDEX 0
#define AIC79XX_SLEWRATE_INDEX 1
#define AIC79XX_AMPLITUDE_INDEX 2
-static struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
+static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
{
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
@@ -369,10 +369,167 @@ static void ahd_release_simq(struct ahd_softc *ahd);
static int ahd_linux_unit;
+/************************** OS Utility Wrappers *******************************/
+void ahd_delay(long);
+void
+ahd_delay(long usec)
+{
+ /*
+ * udelay on Linux can have problems for
+ * multi-millisecond waits. Wait at most
+ * 1024us per call.
+ */
+ while (usec > 0) {
+ udelay(usec % 1024);
+ usec -= 1024;
+ }
+}
+
+
+/***************************** Low Level I/O **********************************/
+uint8_t ahd_inb(struct ahd_softc * ahd, long port);
+void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
+void ahd_outw_atomic(struct ahd_softc * ahd,
+ long port, uint16_t val);
+void ahd_outsb(struct ahd_softc * ahd, long port,
+ uint8_t *, int count);
+void ahd_insb(struct ahd_softc * ahd, long port,
+ uint8_t *, int count);
+
+uint8_t
+ahd_inb(struct ahd_softc * ahd, long port)
+{
+ uint8_t x;
+
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ x = readb(ahd->bshs[0].maddr + port);
+ } else {
+ x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+ }
+ mb();
+ return (x);
+}
+
+#if 0 /* unused */
+static uint16_t
+ahd_inw_atomic(struct ahd_softc * ahd, long port)
+{
+ uint8_t x;
+
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ x = readw(ahd->bshs[0].maddr + port);
+ } else {
+ x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+ }
+ mb();
+ return (x);
+}
+#endif
+
+void
+ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
+{
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ writeb(val, ahd->bshs[0].maddr + port);
+ } else {
+ outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+ }
+ mb();
+}
+
+void
+ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
+{
+ if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+ writew(val, ahd->bshs[0].maddr + port);
+ } else {
+ outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+ }
+ mb();
+}
+
+void
+ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ ahd_outb(ahd, port, *array++);
+}
+
+void
+ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ *array++ = ahd_inb(ahd, port);
+}
+
+/******************************* PCI Routines *********************************/
+uint32_t
+ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
+{
+ switch (width) {
+ case 1:
+ {
+ uint8_t retval;
+
+ pci_read_config_byte(pci, reg, &retval);
+ return (retval);
+ }
+ case 2:
+ {
+ uint16_t retval;
+ pci_read_config_word(pci, reg, &retval);
+ return (retval);
+ }
+ case 4:
+ {
+ uint32_t retval;
+ pci_read_config_dword(pci, reg, &retval);
+ return (retval);
+ }
+ default:
+ panic("ahd_pci_read_config: Read size too big");
+ /* NOTREACHED */
+ return (0);
+ }
+}
+
+void
+ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+ switch (width) {
+ case 1:
+ pci_write_config_byte(pci, reg, value);
+ break;
+ case 2:
+ pci_write_config_word(pci, reg, value);
+ break;
+ case 4:
+ pci_write_config_dword(pci, reg, value);
+ break;
+ default:
+ panic("ahd_pci_write_config: Write size too big");
+ /* NOTREACHED */
+ }
+}
+
/****************************** Inlines ***************************************/
-static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
+static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
-static __inline void
+static void
ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
{
struct scsi_cmnd *cmd;
@@ -400,13 +557,11 @@ ahd_linux_info(struct Scsi_Host *host)
bp = &buffer[0];
ahd = *(struct ahd_softc **)host->hostdata;
memset(bp, 0, sizeof(buffer));
- strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev ");
- strcat(bp, AIC79XX_DRIVER_VERSION);
- strcat(bp, "\n");
- strcat(bp, " <");
+ strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n"
+ " <");
strcat(bp, ahd->description);
- strcat(bp, ">\n");
- strcat(bp, " ");
+ strcat(bp, ">\n"
+ " ");
ahd_controller_info(ahd, ahd_info);
strcat(bp, ahd_info);
@@ -432,7 +587,7 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
return rtn;
}
-static inline struct scsi_target **
+static struct scsi_target **
ahd_linux_target_in_softc(struct scsi_target *starget)
{
struct ahd_softc *ahd =
@@ -991,7 +1146,7 @@ aic79xx_setup(char *s)
char *p;
char *end;
- static struct {
+ static const struct {
const char *name;
uint32_t *flag;
} options[] = {
@@ -1223,7 +1378,7 @@ ahd_platform_init(struct ahd_softc *ahd)
* Lookup and commit any modified IO Cell options.
*/
if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
- struct ahd_linux_iocell_opts *iocell_opts;
+ const struct ahd_linux_iocell_opts *iocell_opts;
iocell_opts = &aic79xx_iocell_info[ahd->unit];
if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
@@ -2613,7 +2768,7 @@ static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
uint8_t precomp;
if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
- struct ahd_linux_iocell_opts *iocell_opts;
+ const struct ahd_linux_iocell_opts *iocell_opts;
iocell_opts = &aic79xx_iocell_info[ahd->unit];
precomp = iocell_opts->precomp;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 853998be147..8d6612c1992 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -222,22 +222,6 @@ typedef struct timer_list ahd_timer_t;
/***************************** Timer Facilities *******************************/
#define ahd_timer_init init_timer
#define ahd_timer_stop del_timer_sync
-typedef void ahd_linux_callback_t (u_long);
-static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
- ahd_callback_t *func, void *arg);
-
-static __inline void
-ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
-{
- struct ahd_softc *ahd;
-
- ahd = (struct ahd_softc *)arg;
- del_timer(timer);
- timer->data = (u_long)arg;
- timer->expires = jiffies + (usec * HZ)/1000000;
- timer->function = (ahd_linux_callback_t*)func;
- add_timer(timer);
-}
/***************************** SMP support ************************************/
#include <linux/spinlock.h>
@@ -376,7 +360,7 @@ struct ahd_platform_data {
#define AHD_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
- uint32_t mem_busaddr; /* Mem Base Addr */
+ resource_size_t mem_busaddr; /* Mem Base Addr */
};
/************************** OS Utility Wrappers *******************************/
@@ -386,111 +370,18 @@ struct ahd_platform_data {
#define malloc(size, type, flags) kmalloc(size, flags)
#define free(ptr, type) kfree(ptr)
-static __inline void ahd_delay(long);
-static __inline void
-ahd_delay(long usec)
-{
- /*
- * udelay on Linux can have problems for
- * multi-millisecond waits. Wait at most
- * 1024us per call.
- */
- while (usec > 0) {
- udelay(usec % 1024);
- usec -= 1024;
- }
-}
-
+void ahd_delay(long);
/***************************** Low Level I/O **********************************/
-static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port);
-static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);
-static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
-static __inline void ahd_outw_atomic(struct ahd_softc * ahd,
+uint8_t ahd_inb(struct ahd_softc * ahd, long port);
+void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
+void ahd_outw_atomic(struct ahd_softc * ahd,
long port, uint16_t val);
-static __inline void ahd_outsb(struct ahd_softc * ahd, long port,
+void ahd_outsb(struct ahd_softc * ahd, long port,
uint8_t *, int count);
-static __inline void ahd_insb(struct ahd_softc * ahd, long port,
+void ahd_insb(struct ahd_softc * ahd, long port,
uint8_t *, int count);
-static __inline uint8_t
-ahd_inb(struct ahd_softc * ahd, long port)
-{
- uint8_t x;
-
- if (ahd->tags[0] == BUS_SPACE_MEMIO) {
- x = readb(ahd->bshs[0].maddr + port);
- } else {
- x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
- }
- mb();
- return (x);
-}
-
-static __inline uint16_t
-ahd_inw_atomic(struct ahd_softc * ahd, long port)
-{
- uint8_t x;
-
- if (ahd->tags[0] == BUS_SPACE_MEMIO) {
- x = readw(ahd->bshs[0].maddr + port);
- } else {
- x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
- }
- mb();
- return (x);
-}
-
-static __inline void
-ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
-{
- if (ahd->tags[0] == BUS_SPACE_MEMIO) {
- writeb(val, ahd->bshs[0].maddr + port);
- } else {
- outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
- }
- mb();
-}
-
-static __inline void
-ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
-{
- if (ahd->tags[0] == BUS_SPACE_MEMIO) {
- writew(val, ahd->bshs[0].maddr + port);
- } else {
- outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
- }
- mb();
-}
-
-static __inline void
-ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
-{
- int i;
-
- /*
- * There is probably a more efficient way to do this on Linux
- * but we don't use this for anything speed critical and this
- * should work.
- */
- for (i = 0; i < count; i++)
- ahd_outb(ahd, port, *array++);
-}
-
-static __inline void
-ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
-{
- int i;
-
- /*
- * There is probably a more efficient way to do this on Linux
- * but we don't use this for anything speed critical and this
- * should work.
- */
- for (i = 0; i < count; i++)
- *array++ = ahd_inb(ahd, port);
-}
-
/**************************** Initialization **********************************/
int ahd_linux_register_host(struct ahd_softc *,
struct scsi_host_template *);
@@ -593,62 +484,12 @@ void ahd_linux_pci_exit(void);
int ahd_pci_map_registers(struct ahd_softc *ahd);
int ahd_pci_map_int(struct ahd_softc *ahd);
-static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
+uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
int reg, int width);
-
-static __inline uint32_t
-ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
-{
- switch (width) {
- case 1:
- {
- uint8_t retval;
-
- pci_read_config_byte(pci, reg, &retval);
- return (retval);
- }
- case 2:
- {
- uint16_t retval;
- pci_read_config_word(pci, reg, &retval);
- return (retval);
- }
- case 4:
- {
- uint32_t retval;
- pci_read_config_dword(pci, reg, &retval);
- return (retval);
- }
- default:
- panic("ahd_pci_read_config: Read size too big");
- /* NOTREACHED */
- return (0);
- }
-}
-
-static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
+void ahd_pci_write_config(ahd_dev_softc_t pci,
int reg, uint32_t value,
int width);
-static __inline void
-ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
-{
- switch (width) {
- case 1:
- pci_write_config_byte(pci, reg, value);
- break;
- case 2:
- pci_write_config_word(pci, reg, value);
- break;
- case 4:
- pci_write_config_dword(pci, reg, value);
- break;
- default:
- panic("ahd_pci_write_config: Write size too big");
- /* NOTREACHED */
- }
-}
-
static __inline int ahd_get_pci_function(ahd_dev_softc_t);
static __inline int
ahd_get_pci_function(ahd_dev_softc_t pci)
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index dfaaae5e73a..6593056867f 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -49,7 +49,7 @@
ID2C(x), \
ID2C(IDIROC(x))
-static struct pci_device_id ahd_linux_pci_id_table[] = {
+static const struct pci_device_id ahd_linux_pci_id_table[] = {
/* aic7901 based controllers */
ID(ID_AHA_29320A),
ID(ID_AHA_29320ALP),
@@ -159,7 +159,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
char buf[80];
struct ahd_softc *ahd;
ahd_dev_softc_t pci;
- struct ahd_pci_identity *entry;
+ const struct ahd_pci_identity *entry;
char *name;
int error;
struct device *dev = &pdev->dev;
@@ -249,8 +249,8 @@ ahd_linux_pci_exit(void)
}
static int
-ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
- u_long *base2)
+ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, resource_size_t *base,
+ resource_size_t *base2)
{
*base = pci_resource_start(ahd->dev_softc, 0);
/*
@@ -272,11 +272,11 @@ ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
static int
ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
- u_long *bus_addr,
+ resource_size_t *bus_addr,
uint8_t __iomem **maddr)
{
- u_long start;
- u_long base_page;
+ resource_size_t start;
+ resource_size_t base_page;
u_long base_offset;
int error = 0;
@@ -310,7 +310,7 @@ int
ahd_pci_map_registers(struct ahd_softc *ahd)
{
uint32_t command;
- u_long base;
+ resource_size_t base;
uint8_t __iomem *maddr;
int error;
@@ -346,31 +346,32 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
} else
command |= PCIM_CMD_MEMEN;
} else if (bootverbose) {
- printf("aic79xx: PCI%d:%d:%d MEM region 0x%lx "
+ printf("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
"unavailable. Cannot memory map device.\n",
ahd_get_pci_bus(ahd->dev_softc),
ahd_get_pci_slot(ahd->dev_softc),
ahd_get_pci_function(ahd->dev_softc),
- base);
+ (unsigned long long)base);
}
if (maddr == NULL) {
- u_long base2;
+ resource_size_t base2;
error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2);
if (error == 0) {
ahd->tags[0] = BUS_SPACE_PIO;
ahd->tags[1] = BUS_SPACE_PIO;
- ahd->bshs[0].ioport = base;
- ahd->bshs[1].ioport = base2;
+ ahd->bshs[0].ioport = (u_long)base;
+ ahd->bshs[1].ioport = (u_long)base2;
command |= PCIM_CMD_PORTEN;
} else {
- printf("aic79xx: PCI%d:%d:%d IO regions 0x%lx and 0x%lx"
- "unavailable. Cannot map device.\n",
+ printf("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
+ "0x%llx unavailable. Cannot map device.\n",
ahd_get_pci_bus(ahd->dev_softc),
ahd_get_pci_slot(ahd->dev_softc),
ahd_get_pci_function(ahd->dev_softc),
- base, base2);
+ (unsigned long long)base,
+ (unsigned long long)base2);
}
}
ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4);
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c9f79fdf913..c25b6adffbf 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -97,7 +97,7 @@ static ahd_device_setup_t ahd_aic7901A_setup;
static ahd_device_setup_t ahd_aic7902_setup;
static ahd_device_setup_t ahd_aic790X_setup;
-static struct ahd_pci_identity ahd_pci_ident_table [] =
+static const struct ahd_pci_identity ahd_pci_ident_table[] =
{
/* aic7901 based controllers */
{
@@ -253,7 +253,7 @@ static void ahd_configure_termination(struct ahd_softc *ahd,
static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat);
static void ahd_pci_intr(struct ahd_softc *ahd);
-struct ahd_pci_identity *
+const struct ahd_pci_identity *
ahd_find_pci_device(ahd_dev_softc_t pci)
{
uint64_t full_id;
@@ -261,7 +261,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
uint16_t vendor;
uint16_t subdevice;
uint16_t subvendor;
- struct ahd_pci_identity *entry;
+ const struct ahd_pci_identity *entry;
u_int i;
vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -292,7 +292,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
}
int
-ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
+ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
{
struct scb_data *shared_scb_data;
u_int command;
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index 6b28bebcbca..014bed716e7 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -57,7 +57,7 @@ static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
* Table of syncrates that don't follow the "divisible by 4"
* rule. This table will be expanded in future SCSI specs.
*/
-static struct {
+static const struct {
u_int period_factor;
u_int period; /* in 100ths of ns */
} scsi_syncrates[] = {
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
index 2068e00d2c7..c21ceab8e91 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -48,13 +48,6 @@ ahd_reg_print_t ahd_error_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrerr_print;
-#else
-#define ahd_clrerr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRERR", 0x04, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_hcntrl_print;
#else
#define ahd_hcntrl_print(regvalue, cur_col, wrap) \
@@ -167,13 +160,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_arbctl_print;
-#else
-#define ahd_arbctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ARBCTL", 0x1b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sg_cache_pre_print;
#else
#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \
@@ -188,20 +174,6 @@ ahd_reg_print_t ahd_lqin_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_typeptr_print;
-#else
-#define ahd_typeptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "TYPEPTR", 0x20, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_tagptr_print;
-#else
-#define ahd_tagptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "TAGPTR", 0x21, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lunptr_print;
#else
#define ahd_lunptr_print(regvalue, cur_col, wrap) \
@@ -209,20 +181,6 @@ ahd_reg_print_t ahd_lunptr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_datalenptr_print;
-#else
-#define ahd_datalenptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DATALENPTR", 0x23, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_statlenptr_print;
-#else
-#define ahd_statlenptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "STATLENPTR", 0x24, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_cmdlenptr_print;
#else
#define ahd_cmdlenptr_print(regvalue, cur_col, wrap) \
@@ -258,13 +216,6 @@ ahd_reg_print_t ahd_qnextptr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_idptr_print;
-#else
-#define ahd_idptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "IDPTR", 0x2a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_abrtbyteptr_print;
#else
#define ahd_abrtbyteptr_print(regvalue, cur_col, wrap) \
@@ -279,27 +230,6 @@ ahd_reg_print_t ahd_abrtbitptr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmdbytes_print;
-#else
-#define ahd_maxcmdbytes_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MAXCMDBYTES", 0x2d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmd2rcv_print;
-#else
-#define ahd_maxcmd2rcv_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MAXCMD2RCV", 0x2e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_shortthresh_print;
-#else
-#define ahd_shortthresh_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SHORTTHRESH", 0x2f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lunlen_print;
#else
#define ahd_lunlen_print(regvalue, cur_col, wrap) \
@@ -328,41 +258,6 @@ ahd_reg_print_t ahd_maxcmdcnt_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqrsvd01_print;
-#else
-#define ahd_lqrsvd01_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQRSVD01", 0x34, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqrsvd16_print;
-#else
-#define ahd_lqrsvd16_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQRSVD16", 0x35, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqrsvd17_print;
-#else
-#define ahd_lqrsvd17_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQRSVD17", 0x36, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmdrsvd0_print;
-#else
-#define ahd_cmdrsvd0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMDRSVD0", 0x37, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqctl0_print;
-#else
-#define ahd_lqctl0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQCTL0", 0x38, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lqctl1_print;
#else
#define ahd_lqctl1_print(regvalue, cur_col, wrap) \
@@ -370,13 +265,6 @@ ahd_reg_print_t ahd_lqctl1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsbist0_print;
-#else
-#define ahd_scsbist0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSBIST0", 0x39, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lqctl2_print;
#else
#define ahd_lqctl2_print(regvalue, cur_col, wrap) \
@@ -384,13 +272,6 @@ ahd_reg_print_t ahd_lqctl2_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsbist1_print;
-#else
-#define ahd_scsbist1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSBIST1", 0x3a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scsiseq0_print;
#else
#define ahd_scsiseq0_print(regvalue, cur_col, wrap) \
@@ -412,20 +293,6 @@ ahd_reg_print_t ahd_sxfrctl0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dlcount_print;
-#else
-#define ahd_dlcount_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DLCOUNT", 0x3c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_businitid_print;
-#else
-#define ahd_businitid_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "BUSINITID", 0x3c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sxfrctl1_print;
#else
#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \
@@ -433,20 +300,6 @@ ahd_reg_print_t ahd_sxfrctl1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_bustargid_print;
-#else
-#define ahd_bustargid_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "BUSTARGID", 0x3e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sxfrctl2_print;
-#else
-#define ahd_sxfrctl2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SXFRCTL2", 0x3e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dffstat_print;
#else
#define ahd_dffstat_print(regvalue, cur_col, wrap) \
@@ -454,17 +307,17 @@ ahd_reg_print_t ahd_dffstat_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsisigo_print;
+ahd_reg_print_t ahd_multargid_print;
#else
-#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
+#define ahd_multargid_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_multargid_print;
+ahd_reg_print_t ahd_scsisigo_print;
#else
-#define ahd_multargid_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
+#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -482,13 +335,6 @@ ahd_reg_print_t ahd_scsiphase_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsidat0_img_print;
-#else
-#define ahd_scsidat0_img_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSIDAT0_IMG", 0x43, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scsidat_print;
#else
#define ahd_scsidat_print(regvalue, cur_col, wrap) \
@@ -531,13 +377,6 @@ ahd_reg_print_t ahd_sblkctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint0_print;
-#else
-#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sstat0_print;
#else
#define ahd_sstat0_print(regvalue, cur_col, wrap) \
@@ -552,10 +391,10 @@ ahd_reg_print_t ahd_simode0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint1_print;
+ahd_reg_print_t ahd_clrsint0_print;
#else
-#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
+#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -566,17 +405,17 @@ ahd_reg_print_t ahd_sstat1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sstat2_print;
+ahd_reg_print_t ahd_clrsint1_print;
#else
-#define ahd_sstat2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
+#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_simode2_print;
+ahd_reg_print_t ahd_sstat2_print;
#else
-#define ahd_simode2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SIMODE2", 0x4d, regvalue, cur_col, wrap)
+#define ahd_sstat2_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -622,17 +461,17 @@ ahd_reg_print_t ahd_lqistat0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqiint0_print;
+ahd_reg_print_t ahd_lqimode0_print;
#else
-#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
+#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqimode0_print;
+ahd_reg_print_t ahd_clrlqiint0_print;
#else
-#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
+#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -790,13 +629,6 @@ ahd_reg_print_t ahd_seqintsrc_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_currscb_print;
-#else
-#define ahd_currscb_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_seqimode_print;
#else
#define ahd_seqimode_print(regvalue, cur_col, wrap) \
@@ -804,24 +636,17 @@ ahd_reg_print_t ahd_seqimode_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_mdffstat_print;
-#else
-#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_crccontrol_print;
+ahd_reg_print_t ahd_currscb_print;
#else
-#define ahd_crccontrol_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CRCCONTROL", 0x5d, regvalue, cur_col, wrap)
+#define ahd_currscb_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfftag_print;
+ahd_reg_print_t ahd_mdffstat_print;
#else
-#define ahd_dfftag_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFFTAG", 0x5e, regvalue, cur_col, wrap)
+#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -832,20 +657,6 @@ ahd_reg_print_t ahd_lastscb_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsitest_print;
-#else
-#define ahd_scsitest_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSITEST", 0x5e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_iopdnctl_print;
-#else
-#define ahd_iopdnctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "IOPDNCTL", 0x5f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_shaddr_print;
#else
#define ahd_shaddr_print(regvalue, cur_col, wrap) \
@@ -860,13 +671,6 @@ ahd_reg_print_t ahd_negoaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dgrpcrci_print;
-#else
-#define ahd_dgrpcrci_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DGRPCRCI", 0x60, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_negperiod_print;
#else
#define ahd_negperiod_print(regvalue, cur_col, wrap) \
@@ -874,13 +678,6 @@ ahd_reg_print_t ahd_negperiod_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_packcrci_print;
-#else
-#define ahd_packcrci_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PACKCRCI", 0x62, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_negoffset_print;
#else
#define ahd_negoffset_print(regvalue, cur_col, wrap) \
@@ -930,13 +727,6 @@ ahd_reg_print_t ahd_iownid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll960ctl0_print;
-#else
-#define ahd_pll960ctl0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL960CTL0", 0x68, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_shcnt_print;
#else
#define ahd_shcnt_print(regvalue, cur_col, wrap) \
@@ -951,27 +741,6 @@ ahd_reg_print_t ahd_townid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll960ctl1_print;
-#else
-#define ahd_pll960ctl1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL960CTL1", 0x69, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll960cnt0_print;
-#else
-#define ahd_pll960cnt0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL960CNT0", 0x6a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_xsig_print;
-#else
-#define ahd_xsig_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "XSIG", 0x6a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_seloid_print;
#else
#define ahd_seloid_print(regvalue, cur_col, wrap) \
@@ -979,41 +748,6 @@ ahd_reg_print_t ahd_seloid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll400ctl0_print;
-#else
-#define ahd_pll400ctl0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL400CTL0", 0x6c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_fairness_print;
-#else
-#define ahd_fairness_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FAIRNESS", 0x6c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll400ctl1_print;
-#else
-#define ahd_pll400ctl1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL400CTL1", 0x6d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_unfairness_print;
-#else
-#define ahd_unfairness_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "UNFAIRNESS", 0x6e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll400cnt0_print;
-#else
-#define ahd_pll400cnt0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLL400CNT0", 0x6e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_haddr_print;
#else
#define ahd_haddr_print(regvalue, cur_col, wrap) \
@@ -1021,27 +755,6 @@ ahd_reg_print_t ahd_haddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_plldelay_print;
-#else
-#define ahd_plldelay_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "PLLDELAY", 0x70, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hodmaadr_print;
-#else
-#define ahd_hodmaadr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HODMAADR", 0x70, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hodmacnt_print;
-#else
-#define ahd_hodmacnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HODMACNT", 0x78, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_hcnt_print;
#else
#define ahd_hcnt_print(regvalue, cur_col, wrap) \
@@ -1049,10 +762,10 @@ ahd_reg_print_t ahd_hcnt_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hodmaen_print;
+ahd_reg_print_t ahd_sghaddr_print;
#else
-#define ahd_hodmaen_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HODMAEN", 0x7a, regvalue, cur_col, wrap)
+#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -1063,10 +776,10 @@ ahd_reg_print_t ahd_scbhaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghaddr_print;
+ahd_reg_print_t ahd_sghcnt_print;
#else
-#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
+#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -1077,13 +790,6 @@ ahd_reg_print_t ahd_scbhcnt_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghcnt_print;
-#else
-#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dff_thrsh_print;
#else
#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \
@@ -1091,132 +797,6 @@ ahd_reg_print_t ahd_dff_thrsh_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_romaddr_print;
-#else
-#define ahd_romaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ROMADDR", 0x8a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_romcntrl_print;
-#else
-#define ahd_romcntrl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ROMCNTRL", 0x8d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_romdata_print;
-#else
-#define ahd_romdata_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ROMDATA", 0x8e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg0_print;
-#else
-#define ahd_cmcrxmsg0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCRXMSG0", 0x90, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_roenable_print;
-#else
-#define ahd_roenable_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ROENABLE", 0x90, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg0_print;
-#else
-#define ahd_ovlyrxmsg0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYRXMSG0", 0x90, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg0_print;
-#else
-#define ahd_dchrxmsg0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHRXMSG0", 0x90, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg1_print;
-#else
-#define ahd_ovlyrxmsg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYRXMSG1", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_nsenable_print;
-#else
-#define ahd_nsenable_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "NSENABLE", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg1_print;
-#else
-#define ahd_cmcrxmsg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCRXMSG1", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg1_print;
-#else
-#define ahd_dchrxmsg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHRXMSG1", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg2_print;
-#else
-#define ahd_dchrxmsg2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHRXMSG2", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg2_print;
-#else
-#define ahd_cmcrxmsg2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCRXMSG2", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ost_print;
-#else
-#define ahd_ost_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OST", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg2_print;
-#else
-#define ahd_ovlyrxmsg2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYRXMSG2", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg3_print;
-#else
-#define ahd_dchrxmsg3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHRXMSG3", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg3_print;
-#else
-#define ahd_ovlyrxmsg3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYRXMSG3", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg3_print;
-#else
-#define ahd_cmcrxmsg3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCRXMSG3", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_pcixctl_print;
#else
#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
@@ -1224,34 +804,6 @@ ahd_reg_print_t ahd_pcixctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyseqbcnt_print;
-#else
-#define ahd_ovlyseqbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYSEQBCNT", 0x94, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchseqbcnt_print;
-#else
-#define ahd_dchseqbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DCHSEQBCNT", 0x94, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcseqbcnt_print;
-#else
-#define ahd_cmcseqbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCSEQBCNT", 0x94, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcspltstat0_print;
-#else
-#define ahd_cmcspltstat0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dchspltstat0_print;
#else
#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
@@ -1259,27 +811,6 @@ ahd_reg_print_t ahd_dchspltstat0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyspltstat0_print;
-#else
-#define ahd_ovlyspltstat0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcspltstat1_print;
-#else
-#define ahd_cmcspltstat1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyspltstat1_print;
-#else
-#define ahd_ovlyspltstat1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dchspltstat1_print;
#else
#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
@@ -1287,90 +818,6 @@ ahd_reg_print_t ahd_dchspltstat1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg0_print;
-#else
-#define ahd_sgrxmsg0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGRXMSG0", 0x98, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr0_print;
-#else
-#define ahd_slvspltoutadr0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTADR0", 0x98, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg1_print;
-#else
-#define ahd_sgrxmsg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGRXMSG1", 0x99, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr1_print;
-#else
-#define ahd_slvspltoutadr1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTADR1", 0x99, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg2_print;
-#else
-#define ahd_sgrxmsg2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGRXMSG2", 0x9a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr2_print;
-#else
-#define ahd_slvspltoutadr2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTADR2", 0x9a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg3_print;
-#else
-#define ahd_sgrxmsg3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGRXMSG3", 0x9b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr3_print;
-#else
-#define ahd_slvspltoutadr3_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTADR3", 0x9b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgseqbcnt_print;
-#else
-#define ahd_sgseqbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGSEQBCNT", 0x9c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutattr0_print;
-#else
-#define ahd_slvspltoutattr0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTATTR0", 0x9c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutattr1_print;
-#else
-#define ahd_slvspltoutattr1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTATTR1", 0x9d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutattr2_print;
-#else
-#define ahd_slvspltoutattr2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SLVSPLTOUTATTR2", 0x9e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sgspltstat0_print;
#else
#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
@@ -1385,13 +832,6 @@ ahd_reg_print_t ahd_sgspltstat1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sfunct_print;
-#else
-#define ahd_sfunct_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_df0pcistat_print;
#else
#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
@@ -1406,41 +846,6 @@ ahd_reg_print_t ahd_reg0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_df1pcistat_print;
-#else
-#define ahd_df1pcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DF1PCISTAT", 0xa1, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgpcistat_print;
-#else
-#define ahd_sgpcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGPCISTAT", 0xa2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_reg1_print;
-#else
-#define ahd_reg1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "REG1", 0xa2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcpcistat_print;
-#else
-#define ahd_cmcpcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMCPCISTAT", 0xa3, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlypcistat_print;
-#else
-#define ahd_ovlypcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYPCISTAT", 0xa4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_reg_isr_print;
#else
#define ahd_reg_isr_print(regvalue, cur_col, wrap) \
@@ -1455,13 +860,6 @@ ahd_reg_print_t ahd_sg_state_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_msipcistat_print;
-#else
-#define ahd_msipcistat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "MSIPCISTAT", 0xa6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_targpcistat_print;
#else
#define ahd_targpcistat_print(regvalue, cur_col, wrap) \
@@ -1469,13 +867,6 @@ ahd_reg_print_t ahd_targpcistat_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_data_count_odd_print;
-#else
-#define ahd_data_count_odd_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DATA_COUNT_ODD", 0xa7, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scbptr_print;
#else
#define ahd_scbptr_print(regvalue, cur_col, wrap) \
@@ -1483,13 +874,6 @@ ahd_reg_print_t ahd_scbptr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbacnt_print;
-#else
-#define ahd_ccscbacnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CCSCBACNT", 0xab, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scbautoptr_print;
#else
#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
@@ -1504,13 +888,6 @@ ahd_reg_print_t ahd_ccsgaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbadr_bk_print;
-#else
-#define ahd_ccscbadr_bk_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CCSCBADR_BK", 0xac, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_ccscbaddr_print;
#else
#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
@@ -1518,13 +895,6 @@ ahd_reg_print_t ahd_ccscbaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmc_rambist_print;
-#else
-#define ahd_cmc_rambist_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CMC_RAMBIST", 0xad, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_ccscbctl_print;
#else
#define ahd_ccscbctl_print(regvalue, cur_col, wrap) \
@@ -1546,13 +916,6 @@ ahd_reg_print_t ahd_ccsgram_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexadr_print;
-#else
-#define ahd_flexadr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FLEXADR", 0xb0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_ccscbram_print;
#else
#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
@@ -1560,27 +923,6 @@ ahd_reg_print_t ahd_ccscbram_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexcnt_print;
-#else
-#define ahd_flexcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FLEXCNT", 0xb3, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexdmastat_print;
-#else
-#define ahd_flexdmastat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FLEXDMASTAT", 0xb5, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexdata_print;
-#else
-#define ahd_flexdata_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FLEXDATA", 0xb6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_brddat_print;
#else
#define ahd_brddat_print(regvalue, cur_col, wrap) \
@@ -1623,27 +965,6 @@ ahd_reg_print_t ahd_seestat_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbcnt_print;
-#else
-#define ahd_scbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCBCNT", 0xbf, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfwaddr_print;
-#else
-#define ahd_dfwaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFWADDR", 0xc0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspfltrctl_print;
-#else
-#define ahd_dspfltrctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DSPFLTRCTL", 0xc0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dspdatactl_print;
#else
#define ahd_dspdatactl_print(regvalue, cur_col, wrap) \
@@ -1651,27 +972,6 @@ ahd_reg_print_t ahd_dspdatactl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfraddr_print;
-#else
-#define ahd_dfraddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFRADDR", 0xc2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspreqctl_print;
-#else
-#define ahd_dspreqctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DSPREQCTL", 0xc2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspackctl_print;
-#else
-#define ahd_dspackctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DSPACKCTL", 0xc3, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dfdat_print;
#else
#define ahd_dfdat_print(regvalue, cur_col, wrap) \
@@ -1693,76 +993,6 @@ ahd_reg_print_t ahd_wrtbiasctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_rcvrbiosctl_print;
-#else
-#define ahd_rcvrbiosctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "RCVRBIOSCTL", 0xc6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_wrtbiascalc_print;
-#else
-#define ahd_wrtbiascalc_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "WRTBIASCALC", 0xc7, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_rcvrbiascalc_print;
-#else
-#define ahd_rcvrbiascalc_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "RCVRBIASCALC", 0xc8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfptrs_print;
-#else
-#define ahd_dfptrs_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFPTRS", 0xc8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_skewcalc_print;
-#else
-#define ahd_skewcalc_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SKEWCALC", 0xc9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfbkptr_print;
-#else
-#define ahd_dfbkptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFBKPTR", 0xc9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfdbctl_print;
-#else
-#define ahd_dfdbctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFDBCTL", 0xcb, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfscnt_print;
-#else
-#define ahd_dfscnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFSCNT", 0xcc, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfbcnt_print;
-#else
-#define ahd_dfbcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFBCNT", 0xce, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyaddr_print;
-#else
-#define ahd_ovlyaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OVLYADDR", 0xd4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_seqctl0_print;
#else
#define ahd_seqctl0_print(regvalue, cur_col, wrap) \
@@ -1770,13 +1000,6 @@ ahd_reg_print_t ahd_seqctl0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqctl1_print;
-#else
-#define ahd_seqctl1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SEQCTL1", 0xd7, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_flags_print;
#else
#define ahd_flags_print(regvalue, cur_col, wrap) \
@@ -1826,20 +1049,6 @@ ahd_reg_print_t ahd_dindex_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_brkaddr0_print;
-#else
-#define ahd_brkaddr0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "BRKADDR0", 0xe6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_brkaddr1_print;
-#else
-#define ahd_brkaddr1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "BRKADDR1", 0xe6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_allones_print;
#else
#define ahd_allones_print(regvalue, cur_col, wrap) \
@@ -1875,13 +1084,6 @@ ahd_reg_print_t ahd_dindir_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_function1_print;
-#else
-#define ahd_function1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FUNCTION1", 0xf0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_stack_print;
#else
#define ahd_stack_print(regvalue, cur_col, wrap) \
@@ -1903,13 +1105,6 @@ ahd_reg_print_t ahd_curaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lastaddr_print;
-#else
-#define ahd_lastaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LASTADDR", 0xf6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_intvec2_addr_print;
#else
#define ahd_intvec2_addr_print(regvalue, cur_col, wrap) \
@@ -1931,24 +1126,17 @@ ahd_reg_print_t ahd_accum_save_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_scb_tails_print;
-#else
-#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ahd_pci_config_base_print;
+ahd_reg_print_t ahd_sram_base_print;
#else
-#define ahd_ahd_pci_config_base_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE", 0x100, regvalue, cur_col, wrap)
+#define ahd_sram_base_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sram_base_print;
+ahd_reg_print_t ahd_waiting_scb_tails_print;
#else
-#define ahd_sram_base_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
+#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -2218,17 +1406,17 @@ ahd_reg_print_t ahd_mk_message_scsiid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_base_print;
+ahd_reg_print_t ahd_scb_residual_datacnt_print;
#else
-#define ahd_scb_base_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
+#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_residual_datacnt_print;
+ahd_reg_print_t ahd_scb_base_print;
#else
-#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
+#define ahd_scb_base_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -2246,27 +1434,6 @@ ahd_reg_print_t ahd_scb_scsi_status_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_target_phases_print;
-#else
-#define ahd_scb_target_phases_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_TARGET_PHASES", 0x189, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_target_data_dir_print;
-#else
-#define ahd_scb_target_data_dir_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0x18a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_target_itag_print;
-#else
-#define ahd_scb_target_itag_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_TARGET_ITAG", 0x18b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scb_sense_busaddr_print;
#else
#define ahd_scb_sense_busaddr_print(regvalue, cur_col, wrap) \
@@ -2365,13 +1532,6 @@ ahd_reg_print_t ahd_scb_next2_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_spare_print;
-#else
-#define ahd_scb_spare_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_SPARE", 0x1b0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scb_disconnected_lists_print;
#else
#define ahd_scb_disconnected_lists_print(regvalue, cur_col, wrap) \
@@ -2557,10 +1717,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SG_CACHE_PRE 0x1b
-#define LQIN 0x20
-
#define TYPEPTR 0x20
+#define LQIN 0x20
+
#define TAGPTR 0x21
#define LUNPTR 0x22
@@ -2620,14 +1780,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SINGLECMD 0x02
#define ABORTPENDING 0x01
-#define SCSBIST0 0x39
-#define GSBISTERR 0x40
-#define GSBISTDONE 0x20
-#define GSBISTRUN 0x10
-#define OSBISTERR 0x04
-#define OSBISTDONE 0x02
-#define OSBISTRUN 0x01
-
#define LQCTL2 0x39
#define LQIRETRY 0x80
#define LQICONTINUE 0x40
@@ -2638,10 +1790,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LQOTOIDLE 0x02
#define LQOPAUSE 0x01
-#define SCSBIST1 0x3a
-#define NTBISTERR 0x04
-#define NTBISTDONE 0x02
-#define NTBISTRUN 0x01
+#define SCSBIST0 0x39
+#define GSBISTERR 0x40
+#define GSBISTDONE 0x20
+#define GSBISTRUN 0x10
+#define OSBISTERR 0x04
+#define OSBISTDONE 0x02
+#define OSBISTRUN 0x01
#define SCSISEQ0 0x3a
#define TEMODEO 0x80
@@ -2650,8 +1805,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define FORCEBUSFREE 0x10
#define SCSIRSTO 0x01
+#define SCSBIST1 0x3a
+#define NTBISTERR 0x04
+#define NTBISTDONE 0x02
+#define NTBISTRUN 0x01
+
#define SCSISEQ1 0x3b
+#define BUSINITID 0x3c
+
#define SXFRCTL0 0x3c
#define DFON 0x80
#define DFPEXP 0x40
@@ -2660,8 +1822,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DLCOUNT 0x3c
-#define BUSINITID 0x3c
-
#define SXFRCTL1 0x3d
#define BITBUCKET 0x80
#define ENSACHK 0x40
@@ -2686,6 +1846,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CURRFIFO_1 0x01
#define CURRFIFO_0 0x00
+#define MULTARGID 0x40
+
#define SCSISIGO 0x40
#define CDO 0x80
#define IOO 0x40
@@ -2696,8 +1858,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define REQO 0x02
#define ACKO 0x01
-#define MULTARGID 0x40
-
#define SCSISIGI 0x41
#define ATNI 0x10
#define SELI 0x08
@@ -2744,15 +1904,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENAB20 0x04
#define SELWIDE 0x02
-#define CLRSINT0 0x4b
-#define CLRSELDO 0x40
-#define CLRSELDI 0x20
-#define CLRSELINGO 0x10
-#define CLRIOERR 0x08
-#define CLROVERRUN 0x04
-#define CLRSPIORDY 0x02
-#define CLRARBDO 0x01
-
#define SSTAT0 0x4b
#define TARGET 0x80
#define SELDO 0x40
@@ -2772,14 +1923,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENSPIORDY 0x02
#define ENARBDO 0x01
-#define CLRSINT1 0x4c
-#define CLRSELTIMEO 0x80
-#define CLRATNO 0x40
-#define CLRSCSIRSTI 0x20
-#define CLRBUSFREE 0x08
-#define CLRSCSIPERR 0x04
-#define CLRSTRB2FAST 0x02
-#define CLRREQINIT 0x01
+#define CLRSINT0 0x4b
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRIOERR 0x08
+#define CLROVERRUN 0x04
+#define CLRSPIORDY 0x02
+#define CLRARBDO 0x01
#define SSTAT1 0x4c
#define SELTO 0x80
@@ -2791,6 +1942,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define STRB2FAST 0x02
#define REQINIT 0x01
+#define CLRSINT1 0x4c
+#define CLRSELTIMEO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRSTRB2FAST 0x02
+#define CLRREQINIT 0x01
+
#define SSTAT2 0x4d
#define BUSFREETIME 0xc0
#define NONPACKREQ 0x20
@@ -2838,14 +1998,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LQIATNLQ 0x02
#define LQIATNCMD 0x01
-#define CLRLQIINT0 0x50
-#define CLRLQIATNQAS 0x20
-#define CLRLQICRCT1 0x10
-#define CLRLQICRCT2 0x08
-#define CLRLQIBADLQT 0x04
-#define CLRLQIATNLQ 0x02
-#define CLRLQIATNCMD 0x01
-
#define LQIMODE0 0x50
#define ENLQIATNQASK 0x20
#define ENLQICRCT1 0x10
@@ -2854,6 +2006,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENLQIATNLQ 0x02
#define ENLQIATNCMD 0x01
+#define CLRLQIINT0 0x50
+#define CLRLQIATNQAS 0x20
+#define CLRLQICRCT1 0x10
+#define CLRLQICRCT2 0x08
+#define CLRLQIBADLQT 0x04
+#define CLRLQIATNLQ 0x02
+#define CLRLQIATNCMD 0x01
+
#define LQIMODE1 0x51
#define ENLQIPHASE_LQ 0x80
#define ENLQIPHASE_NLQ 0x40
@@ -2976,6 +2136,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LQOSCSCTL 0x5a
#define LQOH2A_VERSION 0x80
+#define LQOBUSETDLY 0x40
+#define LQONOHOLDLACK 0x02
#define LQONOCHKOVER 0x01
#define NEXTSCB 0x5a
@@ -2998,8 +2160,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CFG4ICMD 0x02
#define CFG4TCMD 0x01
-#define CURRSCB 0x5c
-
#define SEQIMODE 0x5c
#define ENCTXTDONE 0x40
#define ENSAVEPTRS 0x20
@@ -3009,6 +2169,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENCFG4ICMD 0x02
#define ENCFG4TCMD 0x01
+#define CURRSCB 0x5c
+
#define MDFFSTAT 0x5d
#define SHCNTNEGATIVE 0x40
#define SHCNTMINUS1 0x20
@@ -3023,29 +2185,29 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DFFTAG 0x5e
-#define LASTSCB 0x5e
-
#define SCSITEST 0x5e
#define CNTRTEST 0x08
#define SEL_TXPLL_DEBUG 0x04
+#define LASTSCB 0x5e
+
#define IOPDNCTL 0x5f
#define DISABLE_OE 0x80
#define PDN_IDIST 0x04
#define PDN_DIFFSENSE 0x01
+#define DGRPCRCI 0x60
+
#define SHADDR 0x60
#define NEGOADDR 0x60
-#define DGRPCRCI 0x60
-
#define NEGPERIOD 0x61
-#define PACKCRCI 0x62
-
#define NEGOFFSET 0x62
+#define PACKCRCI 0x62
+
#define NEGPPROPTS 0x63
#define PPROPT_PACE 0x08
#define PPROPT_QAS 0x04
@@ -3066,6 +2228,7 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ANNEXDAT 0x66
#define SCSCHKN 0x66
+#define BIDICHKDIS 0x80
#define STSELSKIDDIS 0x40
#define CURRFIFODEF 0x20
#define WIDERESEN 0x10
@@ -3090,6 +2253,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SELOID 0x6b
+#define FAIRNESS 0x6c
+
#define PLL400CTL0 0x6c
#define PLL_VCOSEL 0x80
#define PLL_PWDN 0x40
@@ -3099,8 +2264,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define PLL_DLPF 0x02
#define PLL_ENFBM 0x01
-#define FAIRNESS 0x6c
-
#define PLL400CTL1 0x6d
#define PLL_CNTEN 0x80
#define PLL_CNTCLR 0x40
@@ -3112,25 +2275,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define HADDR 0x70
+#define HODMAADR 0x70
+
#define PLLDELAY 0x70
#define SPLIT_DROP_REQ 0x80
-#define HODMAADR 0x70
+#define HCNT 0x78
#define HODMACNT 0x78
-#define HCNT 0x78
-
#define HODMAEN 0x7a
-#define SCBHADDR 0x7c
-
#define SGHADDR 0x7c
-#define SCBHCNT 0x84
+#define SCBHADDR 0x7c
#define SGHCNT 0x84
+#define SCBHCNT 0x84
+
#define DFF_THRSH 0x88
#define WR_DFTHRSH 0x70
#define RD_DFTHRSH 0x07
@@ -3163,6 +2326,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CMCRXMSG0 0x90
+#define OVLYRXMSG0 0x90
+
+#define DCHRXMSG0 0x90
+
#define ROENABLE 0x90
#define MSIROEN 0x20
#define OVLYROEN 0x10
@@ -3171,11 +2338,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DCH1ROEN 0x02
#define DCH0ROEN 0x01
-#define OVLYRXMSG0 0x90
+#define OVLYRXMSG1 0x91
-#define DCHRXMSG0 0x90
+#define CMCRXMSG1 0x91
-#define OVLYRXMSG1 0x91
+#define DCHRXMSG1 0x91
#define NSENABLE 0x91
#define MSINSEN 0x20
@@ -3185,10 +2352,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DCH1NSEN 0x02
#define DCH0NSEN 0x01
-#define CMCRXMSG1 0x91
-
-#define DCHRXMSG1 0x91
-
#define DCHRXMSG2 0x92
#define CMCRXMSG2 0x92
@@ -3212,24 +2375,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define TSCSERREN 0x02
#define CMPABCDIS 0x01
+#define CMCSEQBCNT 0x94
+
#define OVLYSEQBCNT 0x94
#define DCHSEQBCNT 0x94
-#define CMCSEQBCNT 0x94
-
-#define CMCSPLTSTAT0 0x96
-
#define DCHSPLTSTAT0 0x96
#define OVLYSPLTSTAT0 0x96
-#define CMCSPLTSTAT1 0x97
+#define CMCSPLTSTAT0 0x96
#define OVLYSPLTSTAT1 0x97
#define DCHSPLTSTAT1 0x97
+#define CMCSPLTSTAT1 0x97
+
#define SGRXMSG0 0x98
#define CDNUM 0xf8
#define CFNUM 0x07
@@ -3257,18 +2420,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define TAG_NUM 0x1f
#define RLXORD 0x10
-#define SGSEQBCNT 0x9c
-
#define SLVSPLTOUTATTR0 0x9c
#define LOWER_BCNT 0xff
+#define SGSEQBCNT 0x9c
+
#define SLVSPLTOUTATTR1 0x9d
#define CMPLT_DNUM 0xf8
#define CMPLT_FNUM 0x07
-#define SLVSPLTOUTATTR2 0x9e
-#define CMPLT_BNUM 0xff
-
#define SGSPLTSTAT0 0x9e
#define STAETERM 0x80
#define SCBCERR 0x40
@@ -3279,6 +2439,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define RXSCEMSG 0x02
#define RXSPLTRSP 0x01
+#define SLVSPLTOUTATTR2 0x9e
+#define CMPLT_BNUM 0xff
+
#define SGSPLTSTAT1 0x9f
#define RXDATABUCKET 0x01
@@ -3334,10 +2497,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CCSGADDR 0xac
-#define CCSCBADR_BK 0xac
-
#define CCSCBADDR 0xac
+#define CCSCBADR_BK 0xac
+
#define CMC_RAMBIST 0xad
#define SG_ELEMENT_SIZE 0x80
#define SCBRAMBIST_FAIL 0x40
@@ -3391,9 +2554,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SEEDAT 0xbc
#define SEECTL 0xbe
+#define SEEOP_EWDS 0x40
#define SEEOP_WALL 0x40
#define SEEOP_EWEN 0x40
-#define SEEOP_EWDS 0x40
#define SEEOPCODE 0x70
#define SEERST 0x02
#define SEESTART 0x01
@@ -3410,25 +2573,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SCBCNT 0xbf
-#define DFWADDR 0xc0
-
#define DSPFLTRCTL 0xc0
#define FLTRDISABLE 0x20
#define EDGESENSE 0x10
#define DSPFCNTSEL 0x0f
+#define DFWADDR 0xc0
+
#define DSPDATACTL 0xc1
#define BYPASSENAB 0x80
#define DESQDIS 0x10
#define RCVROFFSTDIS 0x04
#define XMITOFFSTDIS 0x02
-#define DFRADDR 0xc2
-
#define DSPREQCTL 0xc2
#define MANREQCTL 0xc0
#define MANREQDLY 0x3f
+#define DFRADDR 0xc2
+
#define DSPACKCTL 0xc3
#define MANACKCTL 0xc0
#define MANACKDLY 0x3f
@@ -3449,14 +2612,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define WRTBIASCALC 0xc7
-#define RCVRBIASCALC 0xc8
-
#define DFPTRS 0xc8
-#define SKEWCALC 0xc9
+#define RCVRBIASCALC 0xc8
#define DFBKPTR 0xc9
+#define SKEWCALC 0xc9
+
#define DFDBCTL 0xcb
#define DFF_CIO_WR_RDY 0x20
#define DFF_CIO_RD_RDY 0x10
@@ -3541,12 +2704,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ACCUM_SAVE 0xfa
-#define WAITING_SCB_TAILS 0x100
-
#define AHD_PCI_CONFIG_BASE 0x100
#define SRAM_BASE 0x100
+#define WAITING_SCB_TAILS 0x100
+
#define WAITING_TID_HEAD 0x120
#define WAITING_TID_TAIL 0x122
@@ -3575,8 +2738,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define PRELOADEN 0x80
#define WIDEODD 0x40
#define SCSIEN 0x20
-#define SDMAEN 0x10
#define SDMAENACK 0x10
+#define SDMAEN 0x10
#define HDMAEN 0x08
#define HDMAENACK 0x08
#define DIRECTION 0x04
@@ -3674,12 +2837,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define MK_MESSAGE_SCSIID 0x162
-#define SCB_BASE 0x180
-
#define SCB_RESIDUAL_DATACNT 0x180
#define SCB_CDB_STORE 0x180
#define SCB_HOST_CDB_PTR 0x180
+#define SCB_BASE 0x180
+
#define SCB_RESIDUAL_SGPTR 0x184
#define SG_ADDR_MASK 0xf8
#define SG_OVERRUN_RESID 0x02
@@ -3747,6 +2910,17 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SCB_DISCONNECTED_LISTS 0x1b8
+#define CMD_GROUP_CODE_SHIFT 0x05
+#define STIMESEL_MIN 0x18
+#define STIMESEL_SHIFT 0x03
+#define INVALID_ADDR 0x80
+#define AHD_PRECOMP_MASK 0x07
+#define TARGET_DATA_IN 0x01
+#define CCSCBADDR_MAX 0x80
+#define NUMDSPS 0x14
+#define SEEOP_EWEN_ADDR 0xc0
+#define AHD_ANNEXCOL_PER_DEV0 0x04
+#define DST_MODE_SHIFT 0x04
#define AHD_TIMER_MAX_US 0x18ffe7
#define AHD_TIMER_MAX_TICKS 0xffff
#define AHD_SENSE_BUFSIZE 0x100
@@ -3781,43 +2955,32 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LUNLEN_SINGLE_LEVEL_LUN 0x0f
#define NVRAM_SCB_OFFSET 0x2c
#define STATUS_PKT_SENSE 0xff
-#define CMD_GROUP_CODE_SHIFT 0x05
#define MAX_OFFSET_PACED_BUG 0x7f
#define STIMESEL_BUG_ADJ 0x08
-#define STIMESEL_MIN 0x18
-#define STIMESEL_SHIFT 0x03
#define CCSGRAM_MAXSEGS 0x10
-#define INVALID_ADDR 0x80
#define SEEOP_ERAL_ADDR 0x80
#define AHD_SLEWRATE_DEF_REVB 0x08
#define AHD_PRECOMP_CUTBACK_17 0x04
-#define AHD_PRECOMP_MASK 0x07
#define SRC_MODE_SHIFT 0x00
#define PKT_OVERRUN_BUFSIZE 0x200
#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30
-#define TARGET_DATA_IN 0x01
#define HOST_MSG 0xff
#define MAX_OFFSET 0xfe
#define BUS_16_BIT 0x01
-#define CCSCBADDR_MAX 0x80
-#define NUMDSPS 0x14
-#define SEEOP_EWEN_ADDR 0xc0
-#define AHD_ANNEXCOL_PER_DEV0 0x04
-#define DST_MODE_SHIFT 0x04
/* Downloaded Constant Definitions */
+#define SG_SIZEOF 0x04
+#define SG_PREFETCH_ALIGN_MASK 0x02
+#define SG_PREFETCH_CNT_LIMIT 0x01
#define CACHELINE_MASK 0x07
#define SCB_TRANSFER_SIZE 0x06
#define PKT_OVERRUN_BUFOFFSET 0x05
-#define SG_SIZEOF 0x04
#define SG_PREFETCH_ADDR_MASK 0x03
-#define SG_PREFETCH_ALIGN_MASK 0x02
-#define SG_PREFETCH_CNT_LIMIT 0x01
#define SG_PREFETCH_CNT 0x00
#define DOWNLOAD_CONST_COUNT 0x08
/* Exported Labels */
-#define LABEL_seq_isr 0x28f
#define LABEL_timer_isr 0x28b
+#define LABEL_seq_isr 0x28f
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
index db38a61a8cb..c4c8a96bf5a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -8,7 +8,7 @@
#include "aic79xx_osm.h"
-static ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
+static const ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
{ "SRC_MODE", 0x07, 0x07 },
{ "DST_MODE", 0x70, 0x70 }
};
@@ -20,7 +20,7 @@ ahd_mode_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x00, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
{ "SPLTINT", 0x01, 0x01 },
{ "CMDCMPLT", 0x02, 0x02 },
{ "SEQINT", 0x04, 0x04 },
@@ -39,7 +39,7 @@ ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x01, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
{ "NO_SEQINT", 0x00, 0xff },
{ "BAD_PHASE", 0x01, 0xff },
{ "SEND_REJECT", 0x02, 0xff },
@@ -76,7 +76,7 @@ ahd_seqintcode_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x02, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRINT_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRINT_parse_table[] = {
{ "CLRSPLTINT", 0x01, 0x01 },
{ "CLRCMDINT", 0x02, 0x02 },
{ "CLRSEQINT", 0x04, 0x04 },
@@ -94,7 +94,7 @@ ahd_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x03, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t ERROR_parse_table[] = {
+static const ahd_reg_parse_entry_t ERROR_parse_table[] = {
{ "DSCTMOUT", 0x02, 0x02 },
{ "ILLOPCODE", 0x04, 0x04 },
{ "SQPARERR", 0x08, 0x08 },
@@ -111,24 +111,7 @@ ahd_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x04, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRERR_parse_table[] = {
- { "CLRDSCTMOUT", 0x02, 0x02 },
- { "CLRILLOPCODE", 0x04, 0x04 },
- { "CLRSQPARERR", 0x08, 0x08 },
- { "CLRDPARERR", 0x10, 0x10 },
- { "CLRMPARERR", 0x20, 0x20 },
- { "CLRCIOACCESFAIL", 0x40, 0x40 },
- { "CLRCIOPARERR", 0x80, 0x80 }
-};
-
-int
-ahd_clrerr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRERR_parse_table, 7, "CLRERR",
- 0x04, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
+static const ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
{ "CHIPRST", 0x01, 0x01 },
{ "CHIPRSTACK", 0x01, 0x01 },
{ "INTEN", 0x02, 0x02 },
@@ -160,7 +143,7 @@ ahd_hescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x08, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
+static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
{ "ENINT_COALESCE", 0x40, 0x40 },
{ "HOST_TQINPOS", 0x80, 0x80 }
};
@@ -172,7 +155,7 @@ ahd_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
{ "SEQ_SPLTINT", 0x01, 0x01 },
{ "SEQ_PCIINT", 0x02, 0x02 },
{ "SEQ_SCSIINT", 0x04, 0x04 },
@@ -187,7 +170,7 @@ ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0c, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
{ "CLRSEQ_SPLTINT", 0x01, 0x01 },
{ "CLRSEQ_PCIINT", 0x02, 0x02 },
{ "CLRSEQ_SCSIINT", 0x04, 0x04 },
@@ -230,7 +213,7 @@ ahd_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x14, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
+static const ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
{ "SCB_QSIZE_4", 0x00, 0x0f },
{ "SCB_QSIZE_8", 0x01, 0x0f },
{ "SCB_QSIZE_16", 0x02, 0x0f },
@@ -258,7 +241,7 @@ ahd_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x16, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t INTCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t INTCTL_parse_table[] = {
{ "SPLTINTEN", 0x01, 0x01 },
{ "SEQINTEN", 0x02, 0x02 },
{ "SCSIINTEN", 0x04, 0x04 },
@@ -276,7 +259,7 @@ ahd_intctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x18, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
+static const ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
{ "DIRECTIONEN", 0x01, 0x01 },
{ "FIFOFLUSH", 0x02, 0x02 },
{ "FIFOFLUSHACK", 0x02, 0x02 },
@@ -297,7 +280,7 @@ ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x19, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
+static const ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
{ "CIOPARCKEN", 0x01, 0x01 },
{ "DISABLE_TWATE", 0x02, 0x02 },
{ "EXTREQLCK", 0x10, 0x10 },
@@ -313,7 +296,7 @@ ahd_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x19, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
+static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
{ "FIFOEMP", 0x01, 0x01 },
{ "FIFOFULL", 0x02, 0x02 },
{ "DFTHRESH", 0x04, 0x04 },
@@ -330,7 +313,7 @@ ahd_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
+static const ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
{ "LAST_SEG_DONE", 0x01, 0x01 },
{ "LAST_SEG", 0x02, 0x02 },
{ "ODD_SEG", 0x04, 0x04 },
@@ -344,20 +327,7 @@ ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t ARBCTL_parse_table[] = {
- { "USE_TIME", 0x07, 0x07 },
- { "RETRY_SWEN", 0x08, 0x08 },
- { "RESET_HARB", 0x80, 0x80 }
-};
-
-int
-ahd_arbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(ARBCTL_parse_table, 3, "ARBCTL",
- 0x1b, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
+static const ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
{ "LAST_SEG", 0x02, 0x02 },
{ "ODD_SEG", 0x04, 0x04 },
{ "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -378,20 +348,6 @@ ahd_lqin_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_typeptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "TYPEPTR",
- 0x20, regvalue, cur_col, wrap));
-}
-
-int
-ahd_tagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "TAGPTR",
- 0x21, regvalue, cur_col, wrap));
-}
-
-int
ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "LUNPTR",
@@ -399,20 +355,6 @@ ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_datalenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DATALENPTR",
- 0x23, regvalue, cur_col, wrap));
-}
-
-int
-ahd_statlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "STATLENPTR",
- 0x24, regvalue, cur_col, wrap));
-}
-
-int
ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "CMDLENPTR",
@@ -448,13 +390,6 @@ ahd_qnextptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_idptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "IDPTR",
- 0x2a, regvalue, cur_col, wrap));
-}
-
-int
ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "ABRTBYTEPTR",
@@ -468,28 +403,7 @@ ahd_abrtbitptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x2c, regvalue, cur_col, wrap));
}
-int
-ahd_maxcmdbytes_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MAXCMDBYTES",
- 0x2d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_maxcmd2rcv_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MAXCMD2RCV",
- 0x2e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_shortthresh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SHORTTHRESH",
- 0x2f, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
+static const ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
{ "ILUNLEN", 0x0f, 0x0f },
{ "TLUNLEN", 0xf0, 0xf0 }
};
@@ -522,49 +436,7 @@ ahd_maxcmdcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x33, regvalue, cur_col, wrap));
}
-int
-ahd_lqrsvd01_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LQRSVD01",
- 0x34, regvalue, cur_col, wrap));
-}
-
-int
-ahd_lqrsvd16_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LQRSVD16",
- 0x35, regvalue, cur_col, wrap));
-}
-
-int
-ahd_lqrsvd17_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LQRSVD17",
- 0x36, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmdrsvd0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CMDRSVD0",
- 0x37, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQCTL0_parse_table[] = {
- { "LQ0INITGCLT", 0x03, 0x03 },
- { "LQ0TARGCLT", 0x0c, 0x0c },
- { "LQIINITGCLT", 0x30, 0x30 },
- { "LQITARGCLT", 0xc0, 0xc0 }
-};
-
-int
-ahd_lqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LQCTL0_parse_table, 4, "LQCTL0",
- 0x38, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
{ "ABORTPENDING", 0x01, 0x01 },
{ "SINGLECMD", 0x02, 0x02 },
{ "PCI2PCI", 0x04, 0x04 }
@@ -577,23 +449,7 @@ ahd_lqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x38, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSBIST0_parse_table[] = {
- { "OSBISTRUN", 0x01, 0x01 },
- { "OSBISTDONE", 0x02, 0x02 },
- { "OSBISTERR", 0x04, 0x04 },
- { "GSBISTRUN", 0x10, 0x10 },
- { "GSBISTDONE", 0x20, 0x20 },
- { "GSBISTERR", 0x40, 0x40 }
-};
-
-int
-ahd_scsbist0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCSBIST0_parse_table, 6, "SCSBIST0",
- 0x39, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
+static const ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
{ "LQOPAUSE", 0x01, 0x01 },
{ "LQOTOIDLE", 0x02, 0x02 },
{ "LQOCONTINUE", 0x04, 0x04 },
@@ -611,20 +467,7 @@ ahd_lqctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x39, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSBIST1_parse_table[] = {
- { "NTBISTRUN", 0x01, 0x01 },
- { "NTBISTDONE", 0x02, 0x02 },
- { "NTBISTERR", 0x04, 0x04 }
-};
-
-int
-ahd_scsbist1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCSBIST1_parse_table, 3, "SCSBIST1",
- 0x3a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
{ "SCSIRSTO", 0x01, 0x01 },
{ "FORCEBUSFREE", 0x10, 0x10 },
{ "ENARBO", 0x20, 0x20 },
@@ -639,7 +482,7 @@ ahd_scsiseq0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
{ "ALTSTIM", 0x01, 0x01 },
{ "ENAUTOATNP", 0x02, 0x02 },
{ "MANUALP", 0x0c, 0x0c },
@@ -655,7 +498,7 @@ ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
+static const ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
{ "SPIOEN", 0x08, 0x08 },
{ "BIOSCANCELEN", 0x10, 0x10 },
{ "DFPEXP", 0x40, 0x40 },
@@ -669,21 +512,7 @@ ahd_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3c, regvalue, cur_col, wrap));
}
-int
-ahd_dlcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DLCOUNT",
- 0x3c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_businitid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "BUSINITID",
- 0x3c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
+static const ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
{ "STPWEN", 0x01, 0x01 },
{ "ACTNEGEN", 0x02, 0x02 },
{ "ENSTIMER", 0x04, 0x04 },
@@ -700,27 +529,7 @@ ahd_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3d, regvalue, cur_col, wrap));
}
-int
-ahd_bustargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "BUSTARGID",
- 0x3e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SXFRCTL2_parse_table[] = {
- { "ASU", 0x07, 0x07 },
- { "CMDDMAEN", 0x08, 0x08 },
- { "AUTORSTDIS", 0x10, 0x10 }
-};
-
-int
-ahd_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
- 0x3e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
{ "CURRFIFO_0", 0x00, 0x03 },
{ "CURRFIFO_1", 0x01, 0x03 },
{ "CURRFIFO_NONE", 0x03, 0x03 },
@@ -736,7 +545,14 @@ ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3f, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
+int
+ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "MULTARGID",
+ 0x40, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
{ "P_DATAOUT", 0x00, 0xe0 },
{ "P_DATAOUT_DT", 0x20, 0xe0 },
{ "P_DATAIN", 0x40, 0xe0 },
@@ -763,14 +579,7 @@ ahd_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x40, regvalue, cur_col, wrap));
}
-int
-ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MULTARGID",
- 0x40, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
{ "P_DATAOUT", 0x00, 0xe0 },
{ "P_DATAOUT_DT", 0x20, 0xe0 },
{ "P_DATAIN", 0x40, 0xe0 },
@@ -797,7 +606,7 @@ ahd_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x41, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
{ "DATA_OUT_PHASE", 0x01, 0x03 },
{ "DATA_IN_PHASE", 0x02, 0x03 },
{ "DATA_PHASE_MASK", 0x03, 0x03 },
@@ -815,13 +624,6 @@ ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scsidat0_img_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCSIDAT0_IMG",
- 0x43, regvalue, cur_col, wrap));
-}
-
-int
ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCSIDAT",
@@ -835,7 +637,7 @@ ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x46, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
+static const ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
{ "TARGID", 0x0f, 0x0f },
{ "CLKOUT", 0x80, 0x80 }
};
@@ -847,7 +649,7 @@ ahd_targidin_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x48, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SELID_parse_table[] = {
+static const ahd_reg_parse_entry_t SELID_parse_table[] = {
{ "ONEBIT", 0x08, 0x08 },
{ "SELID_MASK", 0xf0, 0xf0 }
};
@@ -859,7 +661,7 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x49, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
+static const ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
{ "AUTO_MSGOUT_DE", 0x02, 0x02 },
{ "ENDGFORMCHK", 0x04, 0x04 },
{ "BUSFREEREV", 0x10, 0x10 },
@@ -876,7 +678,7 @@ ahd_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
{ "SELWIDE", 0x02, 0x02 },
{ "ENAB20", 0x04, 0x04 },
{ "ENAB40", 0x08, 0x08 },
@@ -891,24 +693,7 @@ ahd_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
- { "CLRARBDO", 0x01, 0x01 },
- { "CLRSPIORDY", 0x02, 0x02 },
- { "CLROVERRUN", 0x04, 0x04 },
- { "CLRIOERR", 0x08, 0x08 },
- { "CLRSELINGO", 0x10, 0x10 },
- { "CLRSELDI", 0x20, 0x20 },
- { "CLRSELDO", 0x40, 0x40 }
-};
-
-int
-ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
- 0x4b, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
{ "ARBDO", 0x01, 0x01 },
{ "SPIORDY", 0x02, 0x02 },
{ "OVERRUN", 0x04, 0x04 },
@@ -926,7 +711,7 @@ ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
+static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
{ "ENARBDO", 0x01, 0x01 },
{ "ENSPIORDY", 0x02, 0x02 },
{ "ENOVERRUN", 0x04, 0x04 },
@@ -943,24 +728,24 @@ ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
- { "CLRREQINIT", 0x01, 0x01 },
- { "CLRSTRB2FAST", 0x02, 0x02 },
- { "CLRSCSIPERR", 0x04, 0x04 },
- { "CLRBUSFREE", 0x08, 0x08 },
- { "CLRSCSIRSTI", 0x20, 0x20 },
- { "CLRATNO", 0x40, 0x40 },
- { "CLRSELTIMEO", 0x80, 0x80 }
+static const ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
+ { "CLRARBDO", 0x01, 0x01 },
+ { "CLRSPIORDY", 0x02, 0x02 },
+ { "CLROVERRUN", 0x04, 0x04 },
+ { "CLRIOERR", 0x08, 0x08 },
+ { "CLRSELINGO", 0x10, 0x10 },
+ { "CLRSELDI", 0x20, 0x20 },
+ { "CLRSELDO", 0x40, 0x40 }
};
int
-ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
- 0x4c, regvalue, cur_col, wrap));
+ return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
+ 0x4b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
{ "REQINIT", 0x01, 0x01 },
{ "STRB2FAST", 0x02, 0x02 },
{ "SCSIPERR", 0x04, 0x04 },
@@ -978,7 +763,24 @@ ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4c, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
+ { "CLRREQINIT", 0x01, 0x01 },
+ { "CLRSTRB2FAST", 0x02, 0x02 },
+ { "CLRSCSIPERR", 0x04, 0x04 },
+ { "CLRBUSFREE", 0x08, 0x08 },
+ { "CLRSCSIRSTI", 0x20, 0x20 },
+ { "CLRATNO", 0x40, 0x40 },
+ { "CLRSELTIMEO", 0x80, 0x80 }
+};
+
+int
+ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
+ 0x4c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
{ "BUSFREE_LQO", 0x40, 0xc0 },
{ "BUSFREE_DFF0", 0x80, 0xc0 },
{ "BUSFREE_DFF1", 0xc0, 0xc0 },
@@ -998,20 +800,7 @@ ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4d, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SIMODE2_parse_table[] = {
- { "ENDMADONE", 0x01, 0x01 },
- { "ENSDONE", 0x02, 0x02 },
- { "ENWIDE_RES", 0x04, 0x04 }
-};
-
-int
-ahd_simode2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SIMODE2_parse_table, 3, "SIMODE2",
- 0x4d, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
{ "CLRDMADONE", 0x01, 0x01 },
{ "CLRSDONE", 0x02, 0x02 },
{ "CLRWIDE_RES", 0x04, 0x04 },
@@ -1025,7 +814,7 @@ ahd_clrsint2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4d, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
+static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
{ "DTERR", 0x01, 0x01 },
{ "DGFORMERR", 0x02, 0x02 },
{ "CRCERR", 0x04, 0x04 },
@@ -1064,7 +853,7 @@ ahd_lqostate_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4f, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
{ "LQIATNCMD", 0x01, 0x01 },
{ "LQIATNLQ", 0x02, 0x02 },
{ "LQIBADLQT", 0x04, 0x04 },
@@ -1080,23 +869,7 @@ ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x50, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
- { "CLRLQIATNCMD", 0x01, 0x01 },
- { "CLRLQIATNLQ", 0x02, 0x02 },
- { "CLRLQIBADLQT", 0x04, 0x04 },
- { "CLRLQICRCT2", 0x08, 0x08 },
- { "CLRLQICRCT1", 0x10, 0x10 },
- { "CLRLQIATNQAS", 0x20, 0x20 }
-};
-
-int
-ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
- 0x50, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
{ "ENLQIATNCMD", 0x01, 0x01 },
{ "ENLQIATNLQ", 0x02, 0x02 },
{ "ENLQIBADLQT", 0x04, 0x04 },
@@ -1112,7 +885,23 @@ ahd_lqimode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x50, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
+ { "CLRLQIATNCMD", 0x01, 0x01 },
+ { "CLRLQIATNLQ", 0x02, 0x02 },
+ { "CLRLQIBADLQT", 0x04, 0x04 },
+ { "CLRLQICRCT2", 0x08, 0x08 },
+ { "CLRLQICRCT1", 0x10, 0x10 },
+ { "CLRLQIATNQAS", 0x20, 0x20 }
+};
+
+int
+ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
+ 0x50, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
{ "ENLQIOVERI_NLQ", 0x01, 0x01 },
{ "ENLQIOVERI_LQ", 0x02, 0x02 },
{ "ENLQIBADLQI", 0x04, 0x04 },
@@ -1130,7 +919,7 @@ ahd_lqimode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x51, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
{ "LQIOVERI_NLQ", 0x01, 0x01 },
{ "LQIOVERI_LQ", 0x02, 0x02 },
{ "LQIBADLQI", 0x04, 0x04 },
@@ -1148,7 +937,7 @@ ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x51, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
{ "CLRLQIOVERI_NLQ", 0x01, 0x01 },
{ "CLRLQIOVERI_LQ", 0x02, 0x02 },
{ "CLRLQIBADLQI", 0x04, 0x04 },
@@ -1166,7 +955,7 @@ ahd_clrlqiint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x51, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
+static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
{ "LQIGSAVAIL", 0x01, 0x01 },
{ "LQISTOPCMD", 0x02, 0x02 },
{ "LQISTOPLQ", 0x04, 0x04 },
@@ -1184,7 +973,7 @@ ahd_lqistat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x52, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
+static const ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
{ "OSRAMPERR", 0x01, 0x01 },
{ "NTRAMPERR", 0x02, 0x02 }
};
@@ -1196,7 +985,7 @@ ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x53, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
+static const ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
{ "ENOSRAMPERR", 0x01, 0x01 },
{ "ENNTRAMPERR", 0x02, 0x02 }
};
@@ -1208,7 +997,7 @@ ahd_simode3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x53, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
{ "CLROSRAMPERR", 0x01, 0x01 },
{ "CLRNTRAMPERR", 0x02, 0x02 }
};
@@ -1220,7 +1009,7 @@ ahd_clrsint3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x53, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
{ "LQOTCRC", 0x01, 0x01 },
{ "LQOATNPKT", 0x02, 0x02 },
{ "LQOATNLQ", 0x04, 0x04 },
@@ -1235,7 +1024,7 @@ ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x54, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
{ "CLRLQOTCRC", 0x01, 0x01 },
{ "CLRLQOATNPKT", 0x02, 0x02 },
{ "CLRLQOATNLQ", 0x04, 0x04 },
@@ -1250,7 +1039,7 @@ ahd_clrlqoint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x54, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
{ "ENLQOTCRC", 0x01, 0x01 },
{ "ENLQOATNPKT", 0x02, 0x02 },
{ "ENLQOATNLQ", 0x04, 0x04 },
@@ -1265,7 +1054,7 @@ ahd_lqomode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x54, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
{ "ENLQOPHACHGINPKT", 0x01, 0x01 },
{ "ENLQOBUSFREE", 0x02, 0x02 },
{ "ENLQOBADQAS", 0x04, 0x04 },
@@ -1280,7 +1069,7 @@ ahd_lqomode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x55, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
{ "LQOPHACHGINPKT", 0x01, 0x01 },
{ "LQOBUSFREE", 0x02, 0x02 },
{ "LQOBADQAS", 0x04, 0x04 },
@@ -1295,7 +1084,7 @@ ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x55, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
{ "CLRLQOPHACHGINPKT", 0x01, 0x01 },
{ "CLRLQOBUSFREE", 0x02, 0x02 },
{ "CLRLQOBADQAS", 0x04, 0x04 },
@@ -1310,7 +1099,7 @@ ahd_clrlqoint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x55, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
{ "LQOSTOP0", 0x01, 0x01 },
{ "LQOPHACHGOUTPKT", 0x02, 0x02 },
{ "LQOWAITFIFO", 0x10, 0x10 },
@@ -1331,7 +1120,7 @@ ahd_os_space_cnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x56, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
+static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
{ "ENREQINIT", 0x01, 0x01 },
{ "ENSTRB2FAST", 0x02, 0x02 },
{ "ENSCSIPERR", 0x04, 0x04 },
@@ -1356,7 +1145,7 @@ ahd_gsfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x58, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
{ "RSTCHN", 0x01, 0x01 },
{ "CLRCHN", 0x02, 0x02 },
{ "CLRSHCNT", 0x04, 0x04 },
@@ -1370,15 +1159,17 @@ ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
{ "LQONOCHKOVER", 0x01, 0x01 },
+ { "LQONOHOLDLACK", 0x02, 0x02 },
+ { "LQOBUSETDLY", 0x40, 0x40 },
{ "LQOH2A_VERSION", 0x80, 0x80 }
};
int
ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(LQOSCSCTL_parse_table, 2, "LQOSCSCTL",
+ return (ahd_print_register(LQOSCSCTL_parse_table, 4, "LQOSCSCTL",
0x5a, regvalue, cur_col, wrap));
}
@@ -1389,7 +1180,7 @@ ahd_nextscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
{ "CLRCFG4TCMD", 0x01, 0x01 },
{ "CLRCFG4ICMD", 0x02, 0x02 },
{ "CLRCFG4TSTAT", 0x04, 0x04 },
@@ -1406,7 +1197,7 @@ ahd_clrseqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
{ "CFG4TCMD", 0x01, 0x01 },
{ "CFG4ICMD", 0x02, 0x02 },
{ "CFG4TSTAT", 0x04, 0x04 },
@@ -1423,14 +1214,7 @@ ahd_seqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5b, regvalue, cur_col, wrap));
}
-int
-ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CURRSCB",
- 0x5c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
{ "ENCFG4TCMD", 0x01, 0x01 },
{ "ENCFG4ICMD", 0x02, 0x02 },
{ "ENCFG4TSTAT", 0x04, 0x04 },
@@ -1447,7 +1231,14 @@ ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5c, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
+int
+ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+ return (ahd_print_register(NULL, 0, "CURRSCB",
+ 0x5c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
{ "FIFOFREE", 0x01, 0x01 },
{ "DATAINFIFO", 0x02, 0x02 },
{ "DLZERO", 0x04, 0x04 },
@@ -1464,24 +1255,6 @@ ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5d, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CRCCONTROL_parse_table[] = {
- { "CRCVALCHKEN", 0x40, 0x40 }
-};
-
-int
-ahd_crccontrol_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CRCCONTROL_parse_table, 1, "CRCCONTROL",
- 0x5d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfftag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFFTAG",
- 0x5e, regvalue, cur_col, wrap));
-}
-
int
ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1489,31 +1262,6 @@ ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5e, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSITEST_parse_table[] = {
- { "SEL_TXPLL_DEBUG", 0x04, 0x04 },
- { "CNTRTEST", 0x08, 0x08 }
-};
-
-int
-ahd_scsitest_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCSITEST_parse_table, 2, "SCSITEST",
- 0x5e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t IOPDNCTL_parse_table[] = {
- { "PDN_DIFFSENSE", 0x01, 0x01 },
- { "PDN_IDIST", 0x04, 0x04 },
- { "DISABLE_OE", 0x80, 0x80 }
-};
-
-int
-ahd_iopdnctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(IOPDNCTL_parse_table, 3, "IOPDNCTL",
- 0x5f, regvalue, cur_col, wrap));
-}
-
int
ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1529,13 +1277,6 @@ ahd_negoaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_dgrpcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DGRPCRCI",
- 0x60, regvalue, cur_col, wrap));
-}
-
-int
ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "NEGPERIOD",
@@ -1543,20 +1284,13 @@ ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_packcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "PACKCRCI",
- 0x62, regvalue, cur_col, wrap));
-}
-
-int
ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "NEGOFFSET",
0x62, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
+static const ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
{ "PPROPT_IUT", 0x01, 0x01 },
{ "PPROPT_DT", 0x02, 0x02 },
{ "PPROPT_QAS", 0x04, 0x04 },
@@ -1570,7 +1304,7 @@ ahd_negppropts_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x63, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
+static const ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
{ "WIDEXFER", 0x01, 0x01 },
{ "ENAUTOATNO", 0x02, 0x02 },
{ "ENAUTOATNI", 0x04, 0x04 },
@@ -1601,20 +1335,21 @@ ahd_annexdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x66, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
{ "LSTSGCLRDIS", 0x01, 0x01 },
{ "SHVALIDSTDIS", 0x02, 0x02 },
{ "DFFACTCLR", 0x04, 0x04 },
{ "SDONEMSKDIS", 0x08, 0x08 },
{ "WIDERESEN", 0x10, 0x10 },
{ "CURRFIFODEF", 0x20, 0x20 },
- { "STSELSKIDDIS", 0x40, 0x40 }
+ { "STSELSKIDDIS", 0x40, 0x40 },
+ { "BIDICHKDIS", 0x80, 0x80 }
};
int
ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(SCSCHKN_parse_table, 7, "SCSCHKN",
+ return (ahd_print_register(SCSCHKN_parse_table, 8, "SCSCHKN",
0x66, regvalue, cur_col, wrap));
}
@@ -1625,23 +1360,6 @@ ahd_iownid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x67, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PLL960CTL0_parse_table[] = {
- { "PLL_ENFBM", 0x01, 0x01 },
- { "PLL_DLPF", 0x02, 0x02 },
- { "PLL_ENLPF", 0x04, 0x04 },
- { "PLL_ENLUD", 0x08, 0x08 },
- { "PLL_NS", 0x30, 0x30 },
- { "PLL_PWDN", 0x40, 0x40 },
- { "PLL_VCOSEL", 0x80, 0x80 }
-};
-
-int
-ahd_pll960ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLL960CTL0_parse_table, 7, "PLL960CTL0",
- 0x68, regvalue, cur_col, wrap));
-}
-
int
ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1656,33 +1374,6 @@ ahd_townid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x69, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PLL960CTL1_parse_table[] = {
- { "PLL_RST", 0x01, 0x01 },
- { "PLL_CNTCLR", 0x40, 0x40 },
- { "PLL_CNTEN", 0x80, 0x80 }
-};
-
-int
-ahd_pll960ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLL960CTL1_parse_table, 3, "PLL960CTL1",
- 0x69, regvalue, cur_col, wrap));
-}
-
-int
-ahd_pll960cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "PLL960CNT0",
- 0x6a, regvalue, cur_col, wrap));
-}
-
-int
-ahd_xsig_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "XSIG",
- 0x6a, regvalue, cur_col, wrap));
-}
-
int
ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1690,57 +1381,6 @@ ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x6b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PLL400CTL0_parse_table[] = {
- { "PLL_ENFBM", 0x01, 0x01 },
- { "PLL_DLPF", 0x02, 0x02 },
- { "PLL_ENLPF", 0x04, 0x04 },
- { "PLL_ENLUD", 0x08, 0x08 },
- { "PLL_NS", 0x30, 0x30 },
- { "PLL_PWDN", 0x40, 0x40 },
- { "PLL_VCOSEL", 0x80, 0x80 }
-};
-
-int
-ahd_pll400ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLL400CTL0_parse_table, 7, "PLL400CTL0",
- 0x6c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_fairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FAIRNESS",
- 0x6c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t PLL400CTL1_parse_table[] = {
- { "PLL_RST", 0x01, 0x01 },
- { "PLL_CNTCLR", 0x40, 0x40 },
- { "PLL_CNTEN", 0x80, 0x80 }
-};
-
-int
-ahd_pll400ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLL400CTL1_parse_table, 3, "PLL400CTL1",
- 0x6d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_unfairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "UNFAIRNESS",
- 0x6e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_pll400cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "PLL400CNT0",
- 0x6e, regvalue, cur_col, wrap));
-}
-
int
ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1748,31 +1388,6 @@ ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x70, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t PLLDELAY_parse_table[] = {
- { "SPLIT_DROP_REQ", 0x80, 0x80 }
-};
-
-int
-ahd_plldelay_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PLLDELAY_parse_table, 1, "PLLDELAY",
- 0x70, regvalue, cur_col, wrap));
-}
-
-int
-ahd_hodmaadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "HODMAADR",
- 0x70, regvalue, cur_col, wrap));
-}
-
-int
-ahd_hodmacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "HODMACNT",
- 0x78, regvalue, cur_col, wrap));
-}
-
int
ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1781,10 +1396,10 @@ ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_hodmaen_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "HODMAEN",
- 0x7a, regvalue, cur_col, wrap));
+ return (ahd_print_register(NULL, 0, "SGHADDR",
+ 0x7c, regvalue, cur_col, wrap));
}
int
@@ -1795,10 +1410,10 @@ ahd_scbhaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "SGHADDR",
- 0x7c, regvalue, cur_col, wrap));
+ return (ahd_print_register(NULL, 0, "SGHCNT",
+ 0x84, regvalue, cur_col, wrap));
}
int
@@ -1808,14 +1423,7 @@ ahd_scbhcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x84, regvalue, cur_col, wrap));
}
-int
-ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SGHCNT",
- 0x84, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
+static const ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
{ "WR_DFTHRSH_MIN", 0x00, 0x70 },
{ "RD_DFTHRSH_MIN", 0x00, 0x07 },
{ "RD_DFTHRSH_25", 0x01, 0x07 },
@@ -1843,209 +1451,7 @@ ahd_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x88, regvalue, cur_col, wrap));
}
-int
-ahd_romaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ROMADDR",
- 0x8a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t ROMCNTRL_parse_table[] = {
- { "RDY", 0x01, 0x01 },
- { "REPEAT", 0x02, 0x02 },
- { "ROMSPD", 0x18, 0x18 },
- { "ROMOP", 0xe0, 0xe0 }
-};
-
-int
-ahd_romcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(ROMCNTRL_parse_table, 4, "ROMCNTRL",
- 0x8d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_romdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ROMDATA",
- 0x8e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG0_parse_table[] = {
- { "CFNUM", 0x07, 0x07 },
- { "CDNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_cmcrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCRXMSG0_parse_table, 2, "CMCRXMSG0",
- 0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t ROENABLE_parse_table[] = {
- { "DCH0ROEN", 0x01, 0x01 },
- { "DCH1ROEN", 0x02, 0x02 },
- { "SGROEN", 0x04, 0x04 },
- { "CMCROEN", 0x08, 0x08 },
- { "OVLYROEN", 0x10, 0x10 },
- { "MSIROEN", 0x20, 0x20 }
-};
-
-int
-ahd_roenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(ROENABLE_parse_table, 6, "ROENABLE",
- 0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG0_parse_table[] = {
- { "CFNUM", 0x07, 0x07 },
- { "CDNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_ovlyrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYRXMSG0_parse_table, 2, "OVLYRXMSG0",
- 0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG0_parse_table[] = {
- { "CFNUM", 0x07, 0x07 },
- { "CDNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_dchrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHRXMSG0_parse_table, 2, "DCHRXMSG0",
- 0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG1_parse_table[] = {
- { "CBNUM", 0xff, 0xff }
-};
-
-int
-ahd_ovlyrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYRXMSG1_parse_table, 1, "OVLYRXMSG1",
- 0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t NSENABLE_parse_table[] = {
- { "DCH0NSEN", 0x01, 0x01 },
- { "DCH1NSEN", 0x02, 0x02 },
- { "SGNSEN", 0x04, 0x04 },
- { "CMCNSEN", 0x08, 0x08 },
- { "OVLYNSEN", 0x10, 0x10 },
- { "MSINSEN", 0x20, 0x20 }
-};
-
-int
-ahd_nsenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NSENABLE_parse_table, 6, "NSENABLE",
- 0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG1_parse_table[] = {
- { "CBNUM", 0xff, 0xff }
-};
-
-int
-ahd_cmcrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCRXMSG1_parse_table, 1, "CMCRXMSG1",
- 0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG1_parse_table[] = {
- { "CBNUM", 0xff, 0xff }
-};
-
-int
-ahd_dchrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHRXMSG1_parse_table, 1, "DCHRXMSG1",
- 0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG2_parse_table[] = {
- { "MINDEX", 0xff, 0xff }
-};
-
-int
-ahd_dchrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHRXMSG2_parse_table, 1, "DCHRXMSG2",
- 0x92, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG2_parse_table[] = {
- { "MINDEX", 0xff, 0xff }
-};
-
-int
-ahd_cmcrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCRXMSG2_parse_table, 1, "CMCRXMSG2",
- 0x92, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ost_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "OST",
- 0x92, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG2_parse_table[] = {
- { "MINDEX", 0xff, 0xff }
-};
-
-int
-ahd_ovlyrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYRXMSG2_parse_table, 1, "OVLYRXMSG2",
- 0x92, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG3_parse_table[] = {
- { "MCLASS", 0x0f, 0x0f }
-};
-
-int
-ahd_dchrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHRXMSG3_parse_table, 1, "DCHRXMSG3",
- 0x93, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG3_parse_table[] = {
- { "MCLASS", 0x0f, 0x0f }
-};
-
-int
-ahd_ovlyrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYRXMSG3_parse_table, 1, "OVLYRXMSG3",
- 0x93, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG3_parse_table[] = {
- { "MCLASS", 0x0f, 0x0f }
-};
-
-int
-ahd_cmcrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCRXMSG3_parse_table, 1, "CMCRXMSG3",
- 0x93, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
{ "CMPABCDIS", 0x01, 0x01 },
{ "TSCSERREN", 0x02, 0x02 },
{ "SRSPDPEEN", 0x04, 0x04 },
@@ -2062,46 +1468,7 @@ ahd_pcixctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x93, regvalue, cur_col, wrap));
}
-int
-ahd_ovlyseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "OVLYSEQBCNT",
- 0x94, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dchseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DCHSEQBCNT",
- 0x94, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmcseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CMCSEQBCNT",
- 0x94, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCSPLTSTAT0_parse_table[] = {
- { "RXSPLTRSP", 0x01, 0x01 },
- { "RXSCEMSG", 0x02, 0x02 },
- { "RXOVRUN", 0x04, 0x04 },
- { "CNTNOTCMPLT", 0x08, 0x08 },
- { "SCDATBUCKET", 0x10, 0x10 },
- { "SCADERR", 0x20, 0x20 },
- { "SCBCERR", 0x40, 0x40 },
- { "STAETERM", 0x80, 0x80 }
-};
-
-int
-ahd_cmcspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCSPLTSTAT0_parse_table, 8, "CMCSPLTSTAT0",
- 0x96, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
{ "RXSPLTRSP", 0x01, 0x01 },
{ "RXSCEMSG", 0x02, 0x02 },
{ "RXOVRUN", 0x04, 0x04 },
@@ -2119,47 +1486,7 @@ ahd_dchspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x96, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t OVLYSPLTSTAT0_parse_table[] = {
- { "RXSPLTRSP", 0x01, 0x01 },
- { "RXSCEMSG", 0x02, 0x02 },
- { "RXOVRUN", 0x04, 0x04 },
- { "CNTNOTCMPLT", 0x08, 0x08 },
- { "SCDATBUCKET", 0x10, 0x10 },
- { "SCADERR", 0x20, 0x20 },
- { "SCBCERR", 0x40, 0x40 },
- { "STAETERM", 0x80, 0x80 }
-};
-
-int
-ahd_ovlyspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYSPLTSTAT0_parse_table, 8, "OVLYSPLTSTAT0",
- 0x96, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCSPLTSTAT1_parse_table[] = {
- { "RXDATABUCKET", 0x01, 0x01 }
-};
-
-int
-ahd_cmcspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCSPLTSTAT1_parse_table, 1, "CMCSPLTSTAT1",
- 0x97, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYSPLTSTAT1_parse_table[] = {
- { "RXDATABUCKET", 0x01, 0x01 }
-};
-
-int
-ahd_ovlyspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYSPLTSTAT1_parse_table, 1, "OVLYSPLTSTAT1",
- 0x97, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
{ "RXDATABUCKET", 0x01, 0x01 }
};
@@ -2170,139 +1497,7 @@ ahd_dchspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x97, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SGRXMSG0_parse_table[] = {
- { "CFNUM", 0x07, 0x07 },
- { "CDNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_sgrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGRXMSG0_parse_table, 2, "SGRXMSG0",
- 0x98, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR0_parse_table[] = {
- { "LOWER_ADDR", 0x7f, 0x7f }
-};
-
-int
-ahd_slvspltoutadr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTADR0_parse_table, 1, "SLVSPLTOUTADR0",
- 0x98, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG1_parse_table[] = {
- { "CBNUM", 0xff, 0xff }
-};
-
-int
-ahd_sgrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGRXMSG1_parse_table, 1, "SGRXMSG1",
- 0x99, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR1_parse_table[] = {
- { "REQ_FNUM", 0x07, 0x07 },
- { "REQ_DNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_slvspltoutadr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTADR1_parse_table, 2, "SLVSPLTOUTADR1",
- 0x99, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG2_parse_table[] = {
- { "MINDEX", 0xff, 0xff }
-};
-
-int
-ahd_sgrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGRXMSG2_parse_table, 1, "SGRXMSG2",
- 0x9a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR2_parse_table[] = {
- { "REQ_BNUM", 0xff, 0xff }
-};
-
-int
-ahd_slvspltoutadr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTADR2_parse_table, 1, "SLVSPLTOUTADR2",
- 0x9a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG3_parse_table[] = {
- { "MCLASS", 0x0f, 0x0f }
-};
-
-int
-ahd_sgrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGRXMSG3_parse_table, 1, "SGRXMSG3",
- 0x9b, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR3_parse_table[] = {
- { "RLXORD", 0x10, 0x10 },
- { "TAG_NUM", 0x1f, 0x1f }
-};
-
-int
-ahd_slvspltoutadr3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTADR3_parse_table, 2, "SLVSPLTOUTADR3",
- 0x9b, regvalue, cur_col, wrap));
-}
-
-int
-ahd_sgseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SGSEQBCNT",
- 0x9c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTATTR0_parse_table[] = {
- { "LOWER_BCNT", 0xff, 0xff }
-};
-
-int
-ahd_slvspltoutattr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTATTR0_parse_table, 1, "SLVSPLTOUTATTR0",
- 0x9c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTATTR1_parse_table[] = {
- { "CMPLT_FNUM", 0x07, 0x07 },
- { "CMPLT_DNUM", 0xf8, 0xf8 }
-};
-
-int
-ahd_slvspltoutattr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTATTR1_parse_table, 2, "SLVSPLTOUTATTR1",
- 0x9d, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTATTR2_parse_table[] = {
- { "CMPLT_BNUM", 0xff, 0xff }
-};
-
-int
-ahd_slvspltoutattr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SLVSPLTOUTATTR2_parse_table, 1, "SLVSPLTOUTATTR2",
- 0x9e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
{ "RXSPLTRSP", 0x01, 0x01 },
{ "RXSCEMSG", 0x02, 0x02 },
{ "RXOVRUN", 0x04, 0x04 },
@@ -2320,7 +1515,7 @@ ahd_sgspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x9e, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
{ "RXDATABUCKET", 0x01, 0x01 }
};
@@ -2331,19 +1526,7 @@ ahd_sgspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x9f, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SFUNCT_parse_table[] = {
- { "TEST_NUM", 0x0f, 0x0f },
- { "TEST_GROUP", 0xf0, 0xf0 }
-};
-
-int
-ahd_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SFUNCT_parse_table, 2, "SFUNCT",
- 0x9f, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
{ "DPR", 0x01, 0x01 },
{ "TWATERR", 0x02, 0x02 },
{ "RDPERR", 0x04, 0x04 },
@@ -2368,83 +1551,6 @@ ahd_reg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xa0, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DF1PCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "TWATERR", 0x02, 0x02 },
- { "RDPERR", 0x04, 0x04 },
- { "SCAAPERR", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_df1pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DF1PCISTAT_parse_table, 8, "DF1PCISTAT",
- 0xa1, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGPCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "RDPERR", 0x04, 0x04 },
- { "SCAAPERR", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_sgpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGPCISTAT_parse_table, 7, "SGPCISTAT",
- 0xa2, regvalue, cur_col, wrap));
-}
-
-int
-ahd_reg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "REG1",
- 0xa2, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCPCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "TWATERR", 0x02, 0x02 },
- { "RDPERR", 0x04, 0x04 },
- { "SCAAPERR", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_cmcpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMCPCISTAT_parse_table, 8, "CMCPCISTAT",
- 0xa3, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYPCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "RDPERR", 0x04, 0x04 },
- { "SCAAPERR", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_ovlypcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OVLYPCISTAT_parse_table, 7, "OVLYPCISTAT",
- 0xa4, regvalue, cur_col, wrap));
-}
-
int
ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -2452,7 +1558,7 @@ ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xa4, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
+static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
{ "SEGS_AVAIL", 0x01, 0x01 },
{ "LOADING_NEEDED", 0x02, 0x02 },
{ "FETCH_INPROG", 0x04, 0x04 }
@@ -2465,23 +1571,7 @@ ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xa6, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t MSIPCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "TWATERR", 0x02, 0x02 },
- { "CLRPENDMSI", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 }
-};
-
-int
-ahd_msipcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(MSIPCISTAT_parse_table, 6, "MSIPCISTAT",
- 0xa6, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
{ "TWATERR", 0x02, 0x02 },
{ "STA", 0x08, 0x08 },
{ "SSE", 0x40, 0x40 },
@@ -2496,27 +1586,13 @@ ahd_targpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_data_count_odd_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DATA_COUNT_ODD",
- 0xa7, regvalue, cur_col, wrap));
-}
-
-int
ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCBPTR",
0xa8, regvalue, cur_col, wrap));
}
-int
-ahd_ccscbacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CCSCBACNT",
- 0xab, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
+static const ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
{ "SCBPTR_OFF", 0x07, 0x07 },
{ "SCBPTR_ADDR", 0x38, 0x38 },
{ "AUSCBPTR_EN", 0x80, 0x80 }
@@ -2537,36 +1613,13 @@ ahd_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_ccscbadr_bk_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CCSCBADR_BK",
- 0xac, regvalue, cur_col, wrap));
-}
-
-int
ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "CCSCBADDR",
0xac, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CMC_RAMBIST_parse_table[] = {
- { "CMC_BUFFER_BIST_EN", 0x01, 0x01 },
- { "CMC_BUFFER_BIST_FAIL",0x02, 0x02 },
- { "SG_BIST_EN", 0x10, 0x10 },
- { "SG_BIST_FAIL", 0x20, 0x20 },
- { "SCBRAMBIST_FAIL", 0x40, 0x40 },
- { "SG_ELEMENT_SIZE", 0x80, 0x80 }
-};
-
-int
-ahd_cmc_rambist_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CMC_RAMBIST_parse_table, 6, "CMC_RAMBIST",
- 0xad, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
{ "CCSCBRESET", 0x01, 0x01 },
{ "CCSCBDIR", 0x04, 0x04 },
{ "CCSCBEN", 0x08, 0x08 },
@@ -2582,7 +1635,7 @@ ahd_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xad, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
{ "CCSGRESET", 0x01, 0x01 },
{ "SG_FETCH_REQ", 0x02, 0x02 },
{ "CCSGENACK", 0x08, 0x08 },
@@ -2606,13 +1659,6 @@ ahd_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_flexadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FLEXADR",
- 0xb0, regvalue, cur_col, wrap));
-}
-
-int
ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "CCSCBRAM",
@@ -2620,39 +1666,13 @@ ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_flexcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FLEXCNT",
- 0xb3, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t FLEXDMASTAT_parse_table[] = {
- { "FLEXDMADONE", 0x01, 0x01 },
- { "FLEXDMAERR", 0x02, 0x02 }
-};
-
-int
-ahd_flexdmastat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(FLEXDMASTAT_parse_table, 2, "FLEXDMASTAT",
- 0xb5, regvalue, cur_col, wrap));
-}
-
-int
-ahd_flexdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FLEXDATA",
- 0xb6, regvalue, cur_col, wrap));
-}
-
-int
ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "BRDDAT",
0xb8, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
{ "BRDSTB", 0x01, 0x01 },
{ "BRDRW", 0x02, 0x02 },
{ "BRDEN", 0x04, 0x04 },
@@ -2682,7 +1702,7 @@ ahd_seedat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xbc, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEECTL_parse_table[] = {
+static const ahd_reg_parse_entry_t SEECTL_parse_table[] = {
{ "SEEOP_ERAL", 0x40, 0x70 },
{ "SEEOP_WRITE", 0x50, 0x70 },
{ "SEEOP_READ", 0x60, 0x70 },
@@ -2702,7 +1722,7 @@ ahd_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xbe, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
{ "SEESTART", 0x01, 0x01 },
{ "SEEBUSY", 0x02, 0x02 },
{ "SEEARBACK", 0x04, 0x04 },
@@ -2718,34 +1738,7 @@ ahd_seestat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xbe, regvalue, cur_col, wrap));
}
-int
-ahd_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCBCNT",
- 0xbf, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFWADDR",
- 0xc0, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPFLTRCTL_parse_table[] = {
- { "DSPFCNTSEL", 0x0f, 0x0f },
- { "EDGESENSE", 0x10, 0x10 },
- { "FLTRDISABLE", 0x20, 0x20 }
-};
-
-int
-ahd_dspfltrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DSPFLTRCTL_parse_table, 3, "DSPFLTRCTL",
- 0xc0, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
+static const ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
{ "XMITOFFSTDIS", 0x02, 0x02 },
{ "RCVROFFSTDIS", 0x04, 0x04 },
{ "DESQDIS", 0x10, 0x10 },
@@ -2760,44 +1753,13 @@ ahd_dspdatactl_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFRADDR",
- 0xc2, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPREQCTL_parse_table[] = {
- { "MANREQDLY", 0x3f, 0x3f },
- { "MANREQCTL", 0xc0, 0xc0 }
-};
-
-int
-ahd_dspreqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DSPREQCTL_parse_table, 2, "DSPREQCTL",
- 0xc2, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPACKCTL_parse_table[] = {
- { "MANACKDLY", 0x3f, 0x3f },
- { "MANACKCTL", 0xc0, 0xc0 }
-};
-
-int
-ahd_dspackctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DSPACKCTL_parse_table, 2, "DSPACKCTL",
- 0xc3, regvalue, cur_col, wrap));
-}
-
-int
ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "DFDAT",
0xc4, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
+static const ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
{ "DSPSEL", 0x1f, 0x1f },
{ "AUTOINCEN", 0x80, 0x80 }
};
@@ -2809,7 +1771,7 @@ ahd_dspselect_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xc4, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
{ "XMITMANVAL", 0x3f, 0x3f },
{ "AUTOXBCDIS", 0x80, 0x80 }
};
@@ -2821,91 +1783,7 @@ ahd_wrtbiasctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xc5, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t RCVRBIOSCTL_parse_table[] = {
- { "RCVRMANVAL", 0x3f, 0x3f },
- { "AUTORBCDIS", 0x80, 0x80 }
-};
-
-int
-ahd_rcvrbiosctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(RCVRBIOSCTL_parse_table, 2, "RCVRBIOSCTL",
- 0xc6, regvalue, cur_col, wrap));
-}
-
-int
-ahd_wrtbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "WRTBIASCALC",
- 0xc7, regvalue, cur_col, wrap));
-}
-
-int
-ahd_rcvrbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "RCVRBIASCALC",
- 0xc8, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfptrs_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFPTRS",
- 0xc8, regvalue, cur_col, wrap));
-}
-
-int
-ahd_skewcalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SKEWCALC",
- 0xc9, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfbkptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFBKPTR",
- 0xc9, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DFDBCTL_parse_table[] = {
- { "DFF_RAMBIST_EN", 0x01, 0x01 },
- { "DFF_RAMBIST_DONE", 0x02, 0x02 },
- { "DFF_RAMBIST_FAIL", 0x04, 0x04 },
- { "DFF_DIR_ERR", 0x08, 0x08 },
- { "DFF_CIO_RD_RDY", 0x10, 0x10 },
- { "DFF_CIO_WR_RDY", 0x20, 0x20 }
-};
-
-int
-ahd_dfdbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DFDBCTL_parse_table, 6, "DFDBCTL",
- 0xcb, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfscnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFSCNT",
- 0xcc, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFBCNT",
- 0xce, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ovlyaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "OVLYADDR",
- 0xd4, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
{ "LOADRAM", 0x01, 0x01 },
{ "SEQRESET", 0x02, 0x02 },
{ "STEP", 0x04, 0x04 },
@@ -2923,21 +1801,7 @@ ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xd6, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQCTL1_parse_table[] = {
- { "RAMBIST_EN", 0x01, 0x01 },
- { "RAMBIST_FAIL", 0x02, 0x02 },
- { "RAMBIST_DONE", 0x04, 0x04 },
- { "OVRLAY_DATA_CHK", 0x08, 0x08 }
-};
-
-int
-ahd_seqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SEQCTL1_parse_table, 4, "SEQCTL1",
- 0xd7, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t FLAGS_parse_table[] = {
+static const ahd_reg_parse_entry_t FLAGS_parse_table[] = {
{ "CARRY", 0x01, 0x01 },
{ "ZERO", 0x02, 0x02 }
};
@@ -2949,7 +1813,7 @@ ahd_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xd8, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
{ "IRET", 0x01, 0x01 },
{ "INTMASK1", 0x02, 0x02 },
{ "INTMASK2", 0x04, 0x04 },
@@ -3002,24 +1866,6 @@ ahd_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_brkaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "BRKADDR0",
- 0xe6, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t BRKADDR1_parse_table[] = {
- { "BRKDIS", 0x80, 0x80 }
-};
-
-int
-ahd_brkaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(BRKADDR1_parse_table, 1, "BRKADDR1",
- 0xe6, regvalue, cur_col, wrap));
-}
-
-int
ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "ALLONES",
@@ -3055,13 +1901,6 @@ ahd_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FUNCTION1",
- 0xf0, regvalue, cur_col, wrap));
-}
-
-int
ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "STACK",
@@ -3083,13 +1922,6 @@ ahd_curaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_lastaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LASTADDR",
- 0xf6, regvalue, cur_col, wrap));
-}
-
-int
ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "INTVEC2_ADDR",
@@ -3111,23 +1943,16 @@ ahd_accum_save_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
- 0x100, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ahd_pci_config_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE",
+ return (ahd_print_register(NULL, 0, "SRAM_BASE",
0x100, regvalue, cur_col, wrap));
}
int
-ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "SRAM_BASE",
+ return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
0x100, regvalue, cur_col, wrap));
}
@@ -3215,7 +2040,7 @@ ahd_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x137, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
+static const ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
{ "FIFORESET", 0x01, 0x01 },
{ "FIFOFLUSH", 0x02, 0x02 },
{ "DIRECTION", 0x04, 0x04 },
@@ -3235,7 +2060,7 @@ ahd_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x138, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
{ "NO_DISCONNECT", 0x01, 0x01 },
{ "SPHASE_PENDING", 0x02, 0x02 },
{ "DPHASE_PENDING", 0x04, 0x04 },
@@ -3268,7 +2093,7 @@ ahd_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x13b, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
+static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
{ "P_DATAOUT", 0x00, 0xe0 },
{ "P_DATAOUT_DT", 0x20, 0xe0 },
{ "P_DATAIN", 0x40, 0xe0 },
@@ -3326,7 +2151,7 @@ ahd_qoutfifo_next_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x144, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t ARG_1_parse_table[] = {
+static const ahd_reg_parse_entry_t ARG_1_parse_table[] = {
{ "CONT_MSG_LOOP_TARG", 0x02, 0x02 },
{ "CONT_MSG_LOOP_READ", 0x03, 0x03 },
{ "CONT_MSG_LOOP_WRITE",0x04, 0x04 },
@@ -3358,7 +2183,7 @@ ahd_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x14a, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
{ "ALTSTIM", 0x01, 0x01 },
{ "ENAUTOATNP", 0x02, 0x02 },
{ "MANUALP", 0x0c, 0x0c },
@@ -3381,7 +2206,7 @@ ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x14c, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
{ "PENDING_MK_MESSAGE", 0x01, 0x01 },
{ "TARGET_MSG_PENDING", 0x02, 0x02 },
{ "SELECTOUT_QFROZEN", 0x04, 0x04 }
@@ -3465,20 +2290,20 @@ ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "SCB_BASE",
+ return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
0x180, regvalue, cur_col, wrap));
}
int
-ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
- return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
+ return (ahd_print_register(NULL, 0, "SCB_BASE",
0x180, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
{ "SG_LIST_NULL", 0x01, 0x01 },
{ "SG_OVERRUN_RESID", 0x02, 0x02 },
{ "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -3499,27 +2324,6 @@ ahd_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_TARGET_PHASES",
- 0x189, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
- 0x18a, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_TARGET_ITAG",
- 0x18b, regvalue, cur_col, wrap));
-}
-
-int
ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR",
@@ -3533,7 +2337,7 @@ ahd_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x190, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
{ "SCB_TAG_TYPE", 0x03, 0x03 },
{ "DISCONNECTED", 0x04, 0x04 },
{ "STATUS_RCVD", 0x08, 0x08 },
@@ -3550,7 +2354,7 @@ ahd_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x192, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
{ "OID", 0x0f, 0x0f },
{ "TID", 0xf0, 0xf0 }
};
@@ -3562,7 +2366,7 @@ ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x193, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
{ "LID", 0xff, 0xff }
};
@@ -3573,7 +2377,7 @@ ahd_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x194, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
{ "SCB_XFERLEN_ODD", 0x01, 0x01 }
};
@@ -3584,7 +2388,7 @@ ahd_scb_task_attribute_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x195, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
{ "SCB_CDB_LEN_PTR", 0x80, 0x80 }
};
@@ -3609,7 +2413,7 @@ ahd_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x198, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
{ "SG_HIGH_ADDR_BITS", 0x7f, 0x7f },
{ "SG_LAST_SEG", 0x80, 0x80 }
};
@@ -3621,7 +2425,7 @@ ahd_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1a0, regvalue, cur_col, wrap));
}
-static ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
{ "SG_LIST_NULL", 0x01, 0x01 },
{ "SG_FULL_RESID", 0x02, 0x02 },
{ "SG_STATUS_VALID", 0x04, 0x04 }
@@ -3656,13 +2460,6 @@ ahd_scb_next2_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scb_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_SPARE",
- 0x1b0, regvalue, cur_col, wrap));
-}
-
-int
ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS",
diff --git a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
index 11bed07e90b..4b51e232392 100644
--- a/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
@@ -5,7 +5,7 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
* $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
*/
-static uint8_t seqprog[] = {
+static const uint8_t seqprog[] = {
0xff, 0x02, 0x06, 0x78,
0x00, 0xea, 0x6e, 0x59,
0x01, 0xea, 0x04, 0x30,
@@ -1027,7 +1027,7 @@ ahd_patch0_func(struct ahd_softc *ahd)
return (0);
}
-static struct patch {
+static const struct patch {
ahd_patch_func_t *patch_func;
uint32_t begin :10,
skip_instr :10,
@@ -1166,7 +1166,7 @@ static struct patch {
{ ahd_patch23_func, 815, 11, 1 }
};
-static struct cs {
+static const struct cs {
uint16_t begin;
uint16_t end;
} critical_sections[] = {
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index c0344e61765..e4e651cca3e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -736,7 +736,7 @@ struct ahc_syncrate {
#define ST_SXFR 0x010 /* Rate Single Transition Only */
#define DT_SXFR 0x040 /* Rate Double Transition Only */
uint8_t period; /* Period to send to SCSI target */
- char *rate;
+ const char *rate;
};
/* Safe and valid period for async negotiations. */
@@ -1114,7 +1114,7 @@ typedef int (ahc_device_setup_t)(struct ahc_softc *);
struct ahc_pci_identity {
uint64_t full_id;
uint64_t id_mask;
- char *name;
+ const char *name;
ahc_device_setup_t *setup;
};
@@ -1133,15 +1133,11 @@ extern const int ahc_num_aic7770_devs;
/*************************** Function Declarations ****************************/
/******************************************************************************/
-u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
-void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
-void ahc_busy_tcl(struct ahc_softc *ahc,
- u_int tcl, u_int busyid);
/***************************** PCI Front End *********************************/
-struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
+const struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t);
int ahc_pci_config(struct ahc_softc *,
- struct ahc_pci_identity *);
+ const struct ahc_pci_identity *);
int ahc_pci_test_register_access(struct ahc_softc *);
#ifdef CONFIG_PM
void ahc_pci_resume(struct ahc_softc *ahc);
@@ -1155,9 +1151,6 @@ int aic7770_config(struct ahc_softc *ahc,
/************************** SCB and SCB queue management **********************/
int ahc_probe_scbs(struct ahc_softc *);
-void ahc_run_untagged_queues(struct ahc_softc *ahc);
-void ahc_run_untagged_queue(struct ahc_softc *ahc,
- struct scb_tailq *queue);
void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc,
struct scb *scb);
int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
@@ -1178,22 +1171,8 @@ int ahc_resume(struct ahc_softc *ahc);
#endif
void ahc_set_unit(struct ahc_softc *, int);
void ahc_set_name(struct ahc_softc *, char *);
-void ahc_alloc_scbs(struct ahc_softc *ahc);
void ahc_free(struct ahc_softc *ahc);
int ahc_reset(struct ahc_softc *ahc, int reinit);
-void ahc_shutdown(void *arg);
-
-/*************************** Interrupt Services *******************************/
-void ahc_clear_intstat(struct ahc_softc *ahc);
-void ahc_run_qoutfifo(struct ahc_softc *ahc);
-#ifdef AHC_TARGET_MODE
-void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
-#endif
-void ahc_handle_brkadrint(struct ahc_softc *ahc);
-void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
-void ahc_handle_scsiint(struct ahc_softc *ahc,
- u_int intstat);
-void ahc_clear_critical_section(struct ahc_softc *ahc);
/***************************** Error Recovery *********************************/
typedef enum {
@@ -1214,36 +1193,19 @@ int ahc_search_disc_list(struct ahc_softc *ahc, int target,
char channel, int lun, u_int tag,
int stop_on_first, int remove,
int save_state);
-void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
int ahc_reset_channel(struct ahc_softc *ahc, char channel,
int initiate_reset);
-int ahc_abort_scbs(struct ahc_softc *ahc, int target,
- char channel, int lun, u_int tag,
- role_t role, uint32_t status);
-void ahc_restart(struct ahc_softc *ahc);
-void ahc_calc_residual(struct ahc_softc *ahc,
- struct scb *scb);
+
/*************************** Utility Functions ********************************/
-struct ahc_phase_table_entry*
- ahc_lookup_phase_entry(int phase);
void ahc_compile_devinfo(struct ahc_devinfo *devinfo,
u_int our_id, u_int target,
u_int lun, char channel,
role_t role);
/************************** Transfer Negotiation ******************************/
-struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
+const struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
u_int *ppr_options, u_int maxsync);
u_int ahc_find_period(struct ahc_softc *ahc,
u_int scsirate, u_int maxsync);
-void ahc_validate_offset(struct ahc_softc *ahc,
- struct ahc_initiator_tinfo *tinfo,
- struct ahc_syncrate *syncrate,
- u_int *offset, int wide,
- role_t role);
-void ahc_validate_width(struct ahc_softc *ahc,
- struct ahc_initiator_tinfo *tinfo,
- u_int *bus_width,
- role_t role);
/*
* Negotiation types. These are used to qualify if we should renegotiate
* even if our goal and current transport parameters are identical.
@@ -1263,7 +1225,7 @@ void ahc_set_width(struct ahc_softc *ahc,
u_int width, u_int type, int paused);
void ahc_set_syncrate(struct ahc_softc *ahc,
struct ahc_devinfo *devinfo,
- struct ahc_syncrate *syncrate,
+ const struct ahc_syncrate *syncrate,
u_int period, u_int offset,
u_int ppr_options,
u_int type, int paused);
@@ -1305,11 +1267,10 @@ extern uint32_t ahc_debug;
#define AHC_SHOW_MASKED_ERRORS 0x1000
#define AHC_DEBUG_SEQUENCER 0x2000
#endif
-void ahc_print_scb(struct scb *scb);
void ahc_print_devinfo(struct ahc_softc *ahc,
struct ahc_devinfo *dev);
void ahc_dump_card_state(struct ahc_softc *ahc);
-int ahc_print_register(ahc_reg_parse_entry_t *table,
+int ahc_print_register(const ahc_reg_parse_entry_t *table,
u_int num_entries,
const char *name,
u_int address,
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index e196d83b93c..0d2f763c342 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -238,6 +238,7 @@ register SXFRCTL2 {
register OPTIONMODE {
address 0x008
access_mode RW
+ count 2
field AUTORATEEN 0x80
field AUTOACKEN 0x40
field ATNMGMNTEN 0x20
@@ -254,6 +255,7 @@ register TARGCRCCNT {
address 0x00a
size 2
access_mode RW
+ count 2
}
/*
@@ -344,6 +346,7 @@ register SSTAT2 {
register SSTAT3 {
address 0x00e
access_mode RO
+ count 2
mask SCSICNT 0xf0
mask OFFCNT 0x0f
mask U2OFFCNT 0x7f
@@ -367,6 +370,7 @@ register SCSIID_ULTRA2 {
register SIMODE0 {
address 0x010
access_mode RW
+ count 2
field ENSELDO 0x40
field ENSELDI 0x20
field ENSELINGO 0x10
@@ -429,6 +433,7 @@ register SHADDR {
register SELTIMER {
address 0x018
access_mode RW
+ count 1
field STAGE6 0x20
field STAGE5 0x10
field STAGE4 0x08
@@ -467,6 +472,7 @@ register TARGID {
address 0x01b
size 2
access_mode RW
+ count 14
}
/*
@@ -480,6 +486,7 @@ register TARGID {
register SPIOCAP {
address 0x01b
access_mode RW
+ count 10
field SOFT1 0x80
field SOFT0 0x40
field SOFTCMDEN 0x20
@@ -492,6 +499,7 @@ register SPIOCAP {
register BRDCTL {
address 0x01d
+ count 11
field BRDDAT7 0x80
field BRDDAT6 0x40
field BRDDAT5 0x20
@@ -534,6 +542,7 @@ register BRDCTL {
*/
register SEECTL {
address 0x01e
+ count 11
field EXTARBACK 0x80
field EXTARBREQ 0x40
field SEEMS 0x20
@@ -570,6 +579,7 @@ register SBLKCTL {
register SEQCTL {
address 0x060
access_mode RW
+ count 15
field PERRORDIS 0x80
field PAUSEDIS 0x40
field FAILDIS 0x20
@@ -590,6 +600,7 @@ register SEQCTL {
register SEQRAM {
address 0x061
access_mode RW
+ count 2
}
/*
@@ -604,6 +615,7 @@ register SEQADDR0 {
register SEQADDR1 {
address 0x063
access_mode RW
+ count 8
mask SEQADDR1_MASK 0x01
}
@@ -649,6 +661,7 @@ register NONE {
register FLAGS {
address 0x06b
access_mode RO
+ count 18
field ZERO 0x02
field CARRY 0x01
}
@@ -671,6 +684,7 @@ register FUNCTION1 {
register STACK {
address 0x06f
access_mode RO
+ count 5
}
const STACK_SIZE 4
@@ -692,6 +706,7 @@ register BCTL {
register DSCOMMAND0 {
address 0x084
access_mode RW
+ count 7
field CACHETHEN 0x80 /* Cache Threshold enable */
field DPARCKEN 0x40 /* Data Parity Check Enable */
field MPARCKEN 0x20 /* Memory Parity Check Enable */
@@ -717,6 +732,7 @@ register DSCOMMAND1 {
register BUSTIME {
address 0x085
access_mode RW
+ count 2
mask BOFF 0xf0
mask BON 0x0f
}
@@ -727,6 +743,7 @@ register BUSTIME {
register BUSSPD {
address 0x086
access_mode RW
+ count 2
mask DFTHRSH 0xc0
mask STBOFF 0x38
mask STBON 0x07
@@ -737,6 +754,7 @@ register BUSSPD {
/* aic7850/55/60/70/80/95 only */
register DSPCISTATUS {
address 0x086
+ count 4
mask DFTHRSH_100 0xc0
}
@@ -758,6 +776,7 @@ const SEQ_MAILBOX_SHIFT 0
register HCNTRL {
address 0x087
access_mode RW
+ count 14
field POWRDN 0x40
field SWINT 0x10
field IRQMS 0x08
@@ -869,6 +888,7 @@ register INTSTAT {
register ERROR {
address 0x092
access_mode RO
+ count 26
field CIOPARERR 0x80 /* Ultra2 only */
field PCIERRSTAT 0x40 /* PCI only */
field MPARERR 0x20 /* PCI only */
@@ -885,6 +905,7 @@ register ERROR {
register CLRINT {
address 0x092
access_mode WO
+ count 24
field CLRPARERR 0x10 /* PCI only */
field CLRBRKADRINT 0x08
field CLRSCSIINT 0x04
@@ -943,6 +964,7 @@ register DFDAT {
register SCBCNT {
address 0x09a
access_mode RW
+ count 1
field SCBAUTO 0x80
mask SCBCNT_MASK 0x1f
}
@@ -954,6 +976,7 @@ register SCBCNT {
register QINFIFO {
address 0x09b
access_mode RW
+ count 12
}
/*
@@ -972,11 +995,13 @@ register QINCNT {
register QOUTFIFO {
address 0x09d
access_mode WO
+ count 7
}
register CRCCONTROL1 {
address 0x09d
access_mode RW
+ count 3
field CRCONSEEN 0x80
field CRCVALCHKEN 0x40
field CRCENDCHKEN 0x20
@@ -1013,6 +1038,7 @@ register SCSIPHASE {
register SFUNCT {
address 0x09f
access_mode RW
+ count 4
field ALT_MODE 0x80
}
@@ -1095,6 +1121,7 @@ scb {
}
SCB_SCSIOFFSET {
size 1
+ count 1
}
SCB_NEXT {
size 1
@@ -1118,6 +1145,7 @@ const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */
register SEECTL_2840 {
address 0x0c0
access_mode RW
+ count 2
field CS_2840 0x04
field CK_2840 0x02
field DO_2840 0x01
@@ -1126,6 +1154,7 @@ register SEECTL_2840 {
register STATUS_2840 {
address 0x0c1
access_mode RW
+ count 4
field EEPROM_TF 0x80
mask BIOS_SEL 0x60
mask ADSEL 0x1e
@@ -1161,6 +1190,7 @@ register CCSGCTL {
register CCSCBCNT {
address 0xEF
+ count 1
}
register CCSCBCTL {
@@ -1187,6 +1217,7 @@ register CCSCBRAM {
register SCBBADDR {
address 0x0F0
access_mode RW
+ count 3
}
register CCSCBPTR {
@@ -1195,6 +1226,7 @@ register CCSCBPTR {
register HNSCB_QOFF {
address 0x0F4
+ count 4
}
register SNSCB_QOFF {
@@ -1234,6 +1266,7 @@ register DFF_THRSH {
mask WR_DFTHRSH_85 0x50
mask WR_DFTHRSH_90 0x60
mask WR_DFTHRSH_MAX 0x70
+ count 4
}
register SG_CACHE_PRE {
@@ -1287,6 +1320,7 @@ scratch_ram {
ULTRA_ENB {
alias CMDSIZE_TABLE
size 2
+ count 2
}
/*
* Bit vector of targets that have disconnection disabled as set by
@@ -1296,6 +1330,7 @@ scratch_ram {
*/
DISC_DSB {
size 2
+ count 6
}
CMDSIZE_TABLE_TAIL {
size 4
@@ -1323,6 +1358,7 @@ scratch_ram {
/* Parameters for DMA Logic */
DMAPARAMS {
size 1
+ count 12
field PRELOADEN 0x80
field WIDEODD 0x40
field SCSIEN 0x20
@@ -1436,11 +1472,12 @@ scratch_ram {
KERNEL_TQINPOS {
size 1
}
- TQINPOS {
+ TQINPOS {
size 1
}
ARG_1 {
size 1
+ count 1
mask SEND_MSG 0x80
mask SEND_SENSE 0x40
mask SEND_REJ 0x20
@@ -1495,6 +1532,7 @@ scratch_ram {
size 1
field HA_274_EXTENDED_TRANS 0x01
alias INITIATOR_TAG
+ count 1
}
SEQ_FLAGS2 {
@@ -1518,6 +1556,7 @@ scratch_ram {
*/
SCSICONF {
size 1
+ count 12
field TERM_ENB 0x80
field RESET_SCSI 0x40
field ENSPCHK 0x20
@@ -1527,16 +1566,19 @@ scratch_ram {
INTDEF {
address 0x05c
size 1
+ count 1
field EDGE_TRIG 0x80
mask VECTOR 0x0f
}
HOSTCONF {
address 0x05d
size 1
+ count 1
}
HA_274_BIOSCTRL {
address 0x05f
size 1
+ count 1
mask BIOSMODE 0x30
mask BIOSDISABLED 0x30
field CHANNEL_B_PRIMARY 0x08
@@ -1552,6 +1594,7 @@ scratch_ram {
*/
TARG_OFFSET {
size 16
+ count 1
}
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 3cb07e114e8..dd11999b77b 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -84,16 +84,16 @@ struct seeprom_cmd {
};
/* Short opcodes for the c46 */
-static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
-static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
/* Long opcodes for the C56/C66 */
-static struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
-static struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
/* Common opcodes */
-static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
-static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
+static const struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
+static const struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
/*
* Wait for the SEERDY to go high; about 800 ns.
@@ -108,7 +108,7 @@ static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
* Send a START condition and the given command
*/
static void
-send_seeprom_cmd(struct seeprom_descriptor *sd, struct seeprom_cmd *cmd)
+send_seeprom_cmd(struct seeprom_descriptor *sd, const struct seeprom_cmd *cmd)
{
uint8_t temp;
int i = 0;
@@ -227,7 +227,7 @@ int
ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
u_int start_addr, u_int count)
{
- struct seeprom_cmd *ewen, *ewds;
+ const struct seeprom_cmd *ewen, *ewds;
uint16_t v;
uint8_t temp;
int i, k;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 64e62ce59c1..0ae2b4605d0 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -51,8 +51,7 @@
#endif
/***************************** Lookup Tables **********************************/
-char *ahc_chip_names[] =
-{
+static const char *const ahc_chip_names[] = {
"NONE",
"aic7770",
"aic7850",
@@ -75,10 +74,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
*/
struct ahc_hard_error_entry {
uint8_t errno;
- char *errmesg;
+ const char *errmesg;
};
-static struct ahc_hard_error_entry ahc_hard_errors[] = {
+static const struct ahc_hard_error_entry ahc_hard_errors[] = {
{ ILLHADDR, "Illegal Host Access" },
{ ILLSADDR, "Illegal Sequencer Address referrenced" },
{ ILLOPCODE, "Illegal Opcode in sequencer program" },
@@ -90,7 +89,7 @@ static struct ahc_hard_error_entry ahc_hard_errors[] = {
};
static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
-static struct ahc_phase_table_entry ahc_phase_table[] =
+static const struct ahc_phase_table_entry ahc_phase_table[] =
{
{ P_DATAOUT, MSG_NOOP, "in Data-out phase" },
{ P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
@@ -115,7 +114,7 @@ static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1;
* Provides a mapping of tranfer periods in ns to the proper value to
* stick in the scsixfer reg.
*/
-static struct ahc_syncrate ahc_syncrates[] =
+static const struct ahc_syncrate ahc_syncrates[] =
{
/* ultra2 fast/ultra period rate */
{ 0x42, 0x000, 9, "80.0" },
@@ -148,7 +147,7 @@ static struct ahc_tmode_tstate*
static void ahc_free_tstate(struct ahc_softc *ahc,
u_int scsi_id, char channel, int force);
#endif
-static struct ahc_syncrate*
+static const struct ahc_syncrate*
ahc_devlimited_syncrate(struct ahc_softc *ahc,
struct ahc_initiator_tinfo *,
u_int *period,
@@ -204,9 +203,9 @@ static void ahc_setup_target_msgin(struct ahc_softc *ahc,
#endif
static bus_dmamap_callback_t ahc_dmamap_cb;
-static void ahc_build_free_scb_list(struct ahc_softc *ahc);
-static int ahc_init_scbdata(struct ahc_softc *ahc);
-static void ahc_fini_scbdata(struct ahc_softc *ahc);
+static void ahc_build_free_scb_list(struct ahc_softc *ahc);
+static int ahc_init_scbdata(struct ahc_softc *ahc);
+static void ahc_fini_scbdata(struct ahc_softc *ahc);
static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
struct scb *prev_scb,
struct scb *scb);
@@ -222,7 +221,7 @@ static void ahc_dumpseq(struct ahc_softc *ahc);
#endif
static int ahc_loadseq(struct ahc_softc *ahc);
static int ahc_check_patch(struct ahc_softc *ahc,
- struct patch **start_patch,
+ const struct patch **start_patch,
u_int start_instr, u_int *skip_addr);
static void ahc_download_instr(struct ahc_softc *ahc,
u_int instrptr, uint8_t *dconsts);
@@ -237,11 +236,582 @@ static void ahc_update_scsiid(struct ahc_softc *ahc,
static int ahc_handle_target_cmd(struct ahc_softc *ahc,
struct target_cmd *cmd);
#endif
+
+static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
+static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
+static void ahc_busy_tcl(struct ahc_softc *ahc,
+ u_int tcl, u_int busyid);
+
+/************************** SCB and SCB queue management **********************/
+static void ahc_run_untagged_queues(struct ahc_softc *ahc);
+static void ahc_run_untagged_queue(struct ahc_softc *ahc,
+ struct scb_tailq *queue);
+
+/****************************** Initialization ********************************/
+static void ahc_alloc_scbs(struct ahc_softc *ahc);
+static void ahc_shutdown(void *arg);
+
+/*************************** Interrupt Services *******************************/
+static void ahc_clear_intstat(struct ahc_softc *ahc);
+static void ahc_run_qoutfifo(struct ahc_softc *ahc);
+#ifdef AHC_TARGET_MODE
+static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
+#endif
+static void ahc_handle_brkadrint(struct ahc_softc *ahc);
+static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
+static void ahc_handle_scsiint(struct ahc_softc *ahc,
+ u_int intstat);
+static void ahc_clear_critical_section(struct ahc_softc *ahc);
+
+/***************************** Error Recovery *********************************/
+static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
+static int ahc_abort_scbs(struct ahc_softc *ahc, int target,
+ char channel, int lun, u_int tag,
+ role_t role, uint32_t status);
+static void ahc_calc_residual(struct ahc_softc *ahc,
+ struct scb *scb);
+
+/*********************** Untagged Transaction Routines ************************/
+static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
+static inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
+
+/*
+ * Block our completion routine from starting the next untagged
+ * transaction for this target or target lun.
+ */
+static inline void
+ahc_freeze_untagged_queues(struct ahc_softc *ahc)
+{
+ if ((ahc->flags & AHC_SCB_BTT) == 0)
+ ahc->untagged_queue_lock++;
+}
+
+/*
+ * Allow the next untagged transaction for this target or target lun
+ * to be executed. We use a counting semaphore to allow the lock
+ * to be acquired recursively. Once the count drops to zero, the
+ * transaction queues will be run.
+ */
+static inline void
+ahc_release_untagged_queues(struct ahc_softc *ahc)
+{
+ if ((ahc->flags & AHC_SCB_BTT) == 0) {
+ ahc->untagged_queue_lock--;
+ if (ahc->untagged_queue_lock == 0)
+ ahc_run_untagged_queues(ahc);
+ }
+}
+
/************************* Sequencer Execution Control ************************/
/*
- * Restart the sequencer program from address zero
+ * Work around any chip bugs related to halting sequencer execution.
+ * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
+ * reading a register that will set this signal and deassert it.
+ * Without this workaround, if the chip is paused, by an interrupt or
+ * manual pause while accessing scb ram, accesses to certain registers
+ * will hang the system (infinite pci retries).
+ */
+static void
+ahc_pause_bug_fix(struct ahc_softc *ahc)
+{
+ if ((ahc->features & AHC_ULTRA2) != 0)
+ (void)ahc_inb(ahc, CCSCBCTL);
+}
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+int
+ahc_is_paused(struct ahc_softc *ahc)
+{
+ return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop. The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+void
+ahc_pause(struct ahc_softc *ahc)
+{
+ ahc_outb(ahc, HCNTRL, ahc->pause);
+
+ /*
+ * Since the sequencer can disable pausing in a critical section, we
+ * must loop until it actually stops.
+ */
+ while (ahc_is_paused(ahc) == 0)
+ ;
+
+ ahc_pause_bug_fix(ahc);
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted. If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+void
+ahc_unpause(struct ahc_softc *ahc)
+{
+ if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
+ ahc_outb(ahc, HCNTRL, ahc->unpause);
+}
+
+/************************** Memory mapping routines ***************************/
+static struct ahc_dma_seg *
+ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
+{
+ int sg_index;
+
+ sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_index++;
+
+ return (&scb->sg_list[sg_index]);
+}
+
+static uint32_t
+ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
+{
+ int sg_index;
+
+ /* sg_list_phys points to entry 1, not 0 */
+ sg_index = sg - &scb->sg_list[1];
+
+ return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
+}
+
+static uint32_t
+ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
+{
+ return (ahc->scb_data->hscb_busaddr
+ + (sizeof(struct hardware_scb) * index));
+}
+
+static void
+ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+ ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
+ ahc->scb_data->hscb_dmamap,
+ /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
+ /*len*/sizeof(*scb->hscb), op);
+}
+
+void
+ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+ if (scb->sg_count == 0)
+ return;
+
+ ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
+ /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
+ * sizeof(struct ahc_dma_seg),
+ /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
+}
+
+#ifdef AHC_TARGET_MODE
+static uint32_t
+ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
+{
+ return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
+}
+#endif
+
+/*********************** Miscelaneous Support Functions ***********************/
+/*
+ * Determine whether the sequencer reported a residual
+ * for this SCB/transaction.
+ */
+static void
+ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
+{
+ uint32_t sgptr;
+
+ sgptr = ahc_le32toh(scb->hscb->sgptr);
+ if ((sgptr & SG_RESID_VALID) != 0)
+ ahc_calc_residual(ahc, scb);
+}
+
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+struct ahc_initiator_tinfo *
+ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
+ u_int remote_id, struct ahc_tmode_tstate **tstate)
+{
+ /*
+ * Transfer data structures are stored from the perspective
+ * of the target role. Since the parameters for a connection
+ * in the initiator role to a given target are the same as
+ * when the roles are reversed, we pretend we are the target.
+ */
+ if (channel == 'B')
+ our_id += 8;
+ *tstate = ahc->enabled_targets[our_id];
+ return (&(*tstate)->transinfo[remote_id]);
+}
+
+uint16_t
+ahc_inw(struct ahc_softc *ahc, u_int port)
+{
+ uint16_t r = ahc_inb(ahc, port+1) << 8;
+ return r | ahc_inb(ahc, port);
+}
+
+void
+ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
+{
+ ahc_outb(ahc, port, value & 0xFF);
+ ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+}
+
+uint32_t
+ahc_inl(struct ahc_softc *ahc, u_int port)
+{
+ return ((ahc_inb(ahc, port))
+ | (ahc_inb(ahc, port+1) << 8)
+ | (ahc_inb(ahc, port+2) << 16)
+ | (ahc_inb(ahc, port+3) << 24));
+}
+
+void
+ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
+{
+ ahc_outb(ahc, port, (value) & 0xFF);
+ ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
+ ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
+ ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
+}
+
+uint64_t
+ahc_inq(struct ahc_softc *ahc, u_int port)
+{
+ return ((ahc_inb(ahc, port))
+ | (ahc_inb(ahc, port+1) << 8)
+ | (ahc_inb(ahc, port+2) << 16)
+ | (ahc_inb(ahc, port+3) << 24)
+ | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
+ | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
+ | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
+ | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
+}
+
+void
+ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
+{
+ ahc_outb(ahc, port, value & 0xFF);
+ ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+ ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
+ ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
+ ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
+ ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
+ ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
+ ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
+}
+
+/*
+ * Get a free scb. If there are none, see if we can allocate a new SCB.
+ */
+struct scb *
+ahc_get_scb(struct ahc_softc *ahc)
+{
+ struct scb *scb;
+
+ if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
+ ahc_alloc_scbs(ahc);
+ scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
+ if (scb == NULL)
+ return (NULL);
+ }
+ SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
+ return (scb);
+}
+
+/*
+ * Return an SCB resource to the free list.
+ */
+void
+ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct hardware_scb *hscb;
+
+ hscb = scb->hscb;
+ /* Clean up for the next user */
+ ahc->scb_data->scbindex[hscb->tag] = NULL;
+ scb->flags = SCB_FREE;
+ hscb->control = 0;
+
+ SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
+
+ /* Notify the OSM that a resource is now available. */
+ ahc_platform_scb_free(ahc, scb);
+}
+
+struct scb *
+ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
+{
+ struct scb* scb;
+
+ scb = ahc->scb_data->scbindex[tag];
+ if (scb != NULL)
+ ahc_sync_scb(ahc, scb,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ return (scb);
+}
+
+static void
+ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
+{
+ struct hardware_scb *q_hscb;
+ u_int saved_tag;
+
+ /*
+ * Our queuing method is a bit tricky. The card
+ * knows in advance which HSCB to download, and we
+ * can't disappoint it. To achieve this, the next
+ * SCB to download is saved off in ahc->next_queued_scb.
+ * When we are called to queue "an arbitrary scb",
+ * we copy the contents of the incoming HSCB to the one
+ * the sequencer knows about, swap HSCB pointers and
+ * finally assign the SCB to the tag indexed location
+ * in the scb_array. This makes sure that we can still
+ * locate the correct SCB by SCB_TAG.
+ */
+ q_hscb = ahc->next_queued_scb->hscb;
+ saved_tag = q_hscb->tag;
+ memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+ if ((scb->flags & SCB_CDB32_PTR) != 0) {
+ q_hscb->shared_data.cdb_ptr =
+ ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
+ + offsetof(struct hardware_scb, cdb32));
+ }
+ q_hscb->tag = saved_tag;
+ q_hscb->next = scb->hscb->tag;
+
+ /* Now swap HSCB pointers. */
+ ahc->next_queued_scb->hscb = scb->hscb;
+ scb->hscb = q_hscb;
+
+ /* Now define the mapping from tag to SCB in the scbindex */
+ ahc->scb_data->scbindex[scb->hscb->tag] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
*/
void
+ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+ ahc_swap_with_next_hscb(ahc, scb);
+
+ if (scb->hscb->tag == SCB_LIST_NULL
+ || scb->hscb->next == SCB_LIST_NULL)
+ panic("Attempt to queue invalid SCB tag %x:%x\n",
+ scb->hscb->tag, scb->hscb->next);
+
+ /*
+ * Setup data "oddness".
+ */
+ scb->hscb->lun &= LID;
+ if (ahc_get_transfer_length(scb) & 0x1)
+ scb->hscb->lun |= SCB_XFERLEN_ODD;
+
+ /*
+ * Keep a history of SCBs we've downloaded in the qinfifo.
+ */
+ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
+
+ /*
+ * Make sure our data is consistent from the
+ * perspective of the adapter.
+ */
+ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ /* Tell the adapter about the newly queued SCB */
+ if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+ ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+ } else {
+ if ((ahc->features & AHC_AUTOPAUSE) == 0)
+ ahc_pause(ahc);
+ ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+ if ((ahc->features & AHC_AUTOPAUSE) == 0)
+ ahc_unpause(ahc);
+ }
+}
+
+struct scsi_sense_data *
+ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
+{
+ int offset;
+
+ offset = scb - ahc->scb_data->scbarray;
+ return (&ahc->scb_data->sense[offset]);
+}
+
+static uint32_t
+ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
+{
+ int offset;
+
+ offset = scb - ahc->scb_data->scbarray;
+ return (ahc->scb_data->sense_busaddr
+ + (offset * sizeof(struct scsi_sense_data)));
+}
+
+/************************** Interrupt Processing ******************************/
+static void
+ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
+{
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+ /*offset*/0, /*len*/256, op);
+}
+
+static void
+ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
+{
+#ifdef AHC_TARGET_MODE
+ if ((ahc->flags & AHC_TARGETROLE) != 0) {
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap,
+ ahc_targetcmd_offset(ahc, 0),
+ sizeof(struct target_cmd) * AHC_TMODE_CMDS,
+ op);
+ }
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHC_RUN_QOUTFIFO 0x1
+#define AHC_RUN_TQINFIFO 0x2
+static u_int
+ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
+{
+ u_int retval;
+
+ retval = 0;
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+ /*offset*/ahc->qoutfifonext, /*len*/1,
+ BUS_DMASYNC_POSTREAD);
+ if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
+ retval |= AHC_RUN_QOUTFIFO;
+#ifdef AHC_TARGET_MODE
+ if ((ahc->flags & AHC_TARGETROLE) != 0
+ && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
+ ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+ ahc->shared_data_dmamap,
+ ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
+ /*len*/sizeof(struct target_cmd),
+ BUS_DMASYNC_POSTREAD);
+ if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
+ retval |= AHC_RUN_TQINFIFO;
+ }
+#endif
+ return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+int
+ahc_intr(struct ahc_softc *ahc)
+{
+ u_int intstat;
+
+ if ((ahc->pause & INTEN) == 0) {
+ /*
+ * Our interrupt is not enabled on the chip
+ * and may be disabled for re-entrancy reasons,
+ * so just return. This is likely just a shared
+ * interrupt.
+ */
+ return (0);
+ }
+ /*
+ * Instead of directly reading the interrupt status register,
+ * infer the cause of the interrupt by checking our in-core
+ * completion queues. This avoids a costly PCI bus read in
+ * most cases.
+ */
+ if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
+ && (ahc_check_cmdcmpltqueues(ahc) != 0))
+ intstat = CMDCMPLT;
+ else {
+ intstat = ahc_inb(ahc, INTSTAT);
+ }
+
+ if ((intstat & INT_PEND) == 0) {
+#if AHC_PCI_CONFIG > 0
+ if (ahc->unsolicited_ints > 500) {
+ ahc->unsolicited_ints = 0;
+ if ((ahc->chip & AHC_PCI) != 0
+ && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
+ ahc->bus_intr(ahc);
+ }
+#endif
+ ahc->unsolicited_ints++;
+ return (0);
+ }
+ ahc->unsolicited_ints = 0;
+
+ if (intstat & CMDCMPLT) {
+ ahc_outb(ahc, CLRINT, CLRCMDINT);
+
+ /*
+ * Ensure that the chip sees that we've cleared
+ * this interrupt before we walk the output fifo.
+ * Otherwise, we may, due to posted bus writes,
+ * clear the interrupt after we finish the scan,
+ * and after the sequencer has added new entries
+ * and asserted the interrupt again.
+ */
+ ahc_flush_device_writes(ahc);
+ ahc_run_qoutfifo(ahc);
+#ifdef AHC_TARGET_MODE
+ if ((ahc->flags & AHC_TARGETROLE) != 0)
+ ahc_run_tqinfifo(ahc, /*paused*/FALSE);
+#endif
+ }
+
+ /*
+ * Handle statuses that may invalidate our cached
+ * copy of INTSTAT separately.
+ */
+ if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
+ /* Hot eject. Do nothing */
+ } else if (intstat & BRKADRINT) {
+ ahc_handle_brkadrint(ahc);
+ } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
+
+ ahc_pause_bug_fix(ahc);
+
+ if ((intstat & SEQINT) != 0)
+ ahc_handle_seqint(ahc, intstat);
+
+ if ((intstat & SCSIINT) != 0)
+ ahc_handle_scsiint(ahc, intstat);
+ }
+ return (1);
+}
+
+/************************* Sequencer Execution Control ************************/
+/*
+ * Restart the sequencer program from address zero
+ */
+static void
ahc_restart(struct ahc_softc *ahc)
{
@@ -302,7 +872,7 @@ ahc_restart(struct ahc_softc *ahc)
}
/************************* Input/Output Queues ********************************/
-void
+static void
ahc_run_qoutfifo(struct ahc_softc *ahc)
{
struct scb *scb;
@@ -349,7 +919,7 @@ ahc_run_qoutfifo(struct ahc_softc *ahc)
}
}
-void
+static void
ahc_run_untagged_queues(struct ahc_softc *ahc)
{
int i;
@@ -358,7 +928,7 @@ ahc_run_untagged_queues(struct ahc_softc *ahc)
ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
}
-void
+static void
ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
{
struct scb *scb;
@@ -374,7 +944,7 @@ ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
}
/************************* Interrupt Handling *********************************/
-void
+static void
ahc_handle_brkadrint(struct ahc_softc *ahc)
{
/*
@@ -403,7 +973,7 @@ ahc_handle_brkadrint(struct ahc_softc *ahc)
ahc_shutdown(ahc);
}
-void
+static void
ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
{
struct scb *scb;
@@ -954,7 +1524,7 @@ unpause:
ahc_unpause(ahc);
}
-void
+static void
ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
{
u_int scb_index;
@@ -1407,7 +1977,7 @@ ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
}
#define AHC_MAX_STEPS 2000
-void
+static void
ahc_clear_critical_section(struct ahc_softc *ahc)
{
int stepping;
@@ -1500,7 +2070,7 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
/*
* Clear any pending interrupt status.
*/
-void
+static void
ahc_clear_intstat(struct ahc_softc *ahc)
{
/* Clear any interrupt conditions this may have caused */
@@ -1519,7 +2089,8 @@ ahc_clear_intstat(struct ahc_softc *ahc)
uint32_t ahc_debug = AHC_DEBUG_OPTS;
#endif
-void
+#if 0 /* unused */
+static void
ahc_print_scb(struct scb *scb)
{
int i;
@@ -1551,6 +2122,7 @@ ahc_print_scb(struct scb *scb)
}
}
}
+#endif
/************************* Transfer Negotiation *******************************/
/*
@@ -1634,7 +2206,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
* by the capabilities of the bus connectivity of and sync settings for
* the target.
*/
-struct ahc_syncrate *
+const struct ahc_syncrate *
ahc_devlimited_syncrate(struct ahc_softc *ahc,
struct ahc_initiator_tinfo *tinfo,
u_int *period, u_int *ppr_options, role_t role)
@@ -1689,11 +2261,11 @@ ahc_devlimited_syncrate(struct ahc_softc *ahc,
* Return the period and offset that should be sent to the target
* if this was the beginning of an SDTR.
*/
-struct ahc_syncrate *
+const struct ahc_syncrate *
ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
u_int *ppr_options, u_int maxsync)
{
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
if ((ahc->features & AHC_DT) == 0)
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
@@ -1768,7 +2340,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
u_int
ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
{
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
if ((ahc->features & AHC_ULTRA2) != 0)
scsirate &= SXFR_ULTRA2;
@@ -1806,10 +2378,10 @@ ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
* Truncate the given synchronous offset to a value the
* current adapter type and syncrate are capable of.
*/
-void
+static void
ahc_validate_offset(struct ahc_softc *ahc,
struct ahc_initiator_tinfo *tinfo,
- struct ahc_syncrate *syncrate,
+ const struct ahc_syncrate *syncrate,
u_int *offset, int wide, role_t role)
{
u_int maxoffset;
@@ -1838,7 +2410,7 @@ ahc_validate_offset(struct ahc_softc *ahc,
* Truncate the given transfer width parameter to a value the
* current adapter type is capable of.
*/
-void
+static void
ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
u_int *bus_width, role_t role)
{
@@ -1913,7 +2485,7 @@ ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
*/
void
ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
- struct ahc_syncrate *syncrate, u_int period,
+ const struct ahc_syncrate *syncrate, u_int period,
u_int offset, u_int ppr_options, u_int type, int paused)
{
struct ahc_initiator_tinfo *tinfo;
@@ -2220,11 +2792,11 @@ ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
role);
}
-struct ahc_phase_table_entry*
+static const struct ahc_phase_table_entry*
ahc_lookup_phase_entry(int phase)
{
- struct ahc_phase_table_entry *entry;
- struct ahc_phase_table_entry *last_entry;
+ const struct ahc_phase_table_entry *entry;
+ const struct ahc_phase_table_entry *last_entry;
/*
* num_phases doesn't include the default entry which
@@ -2390,7 +2962,7 @@ ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
*/
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
- struct ahc_syncrate *rate;
+ const struct ahc_syncrate *rate;
int dowide;
int dosync;
int doppr;
@@ -2655,7 +3227,7 @@ proto_violation_reset:
*/
static void
ahc_handle_message_phase(struct ahc_softc *ahc)
-{
+{
struct ahc_devinfo devinfo;
u_int bus_phase;
int end_session;
@@ -3056,7 +3628,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
switch (ahc->msgin_buf[2]) {
case MSG_EXT_SDTR:
{
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
u_int period;
u_int ppr_options;
u_int offset;
@@ -3231,7 +3803,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
}
case MSG_EXT_PPR:
{
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
u_int period;
u_int offset;
u_int bus_width;
@@ -3984,7 +4556,7 @@ ahc_free(struct ahc_softc *ahc)
return;
}
-void
+static void
ahc_shutdown(void *arg)
{
struct ahc_softc *ahc;
@@ -4388,7 +4960,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
free(scb_data->scbarray, M_DEVBUF);
}
-void
+static void
ahc_alloc_scbs(struct ahc_softc *ahc)
{
struct scb_data *scb_data;
@@ -5121,7 +5693,7 @@ ahc_resume(struct ahc_softc *ahc)
* Return the untagged transaction id for a given target/channel lun.
* Optionally, clear the entry.
*/
-u_int
+static u_int
ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
{
u_int scbid;
@@ -5142,7 +5714,7 @@ ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
return (scbid);
}
-void
+static void
ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
{
u_int target_offset;
@@ -5160,7 +5732,7 @@ ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
}
}
-void
+static void
ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
{
u_int target_offset;
@@ -5215,7 +5787,7 @@ ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
return match;
}
-void
+static void
ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
{
int target;
@@ -5707,7 +6279,7 @@ ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
*/
static u_int
ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
-{
+{
u_int curscb, next;
/*
@@ -5756,7 +6328,7 @@ ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
* been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
* is paused before it is called.
*/
-int
+static int
ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
int lun, u_int tag, role_t role, uint32_t status)
{
@@ -6078,7 +6650,7 @@ ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
/*
* Calculate the residual for a just completed SCB.
*/
-void
+static void
ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
{
struct hardware_scb *hscb;
@@ -6279,7 +6851,7 @@ ahc_loadseq(struct ahc_softc *ahc)
struct cs cs_table[num_critical_sections];
u_int begin_set[num_critical_sections];
u_int end_set[num_critical_sections];
- struct patch *cur_patch;
+ const struct patch *cur_patch;
u_int cs_count;
u_int cur_cs;
u_int i;
@@ -6384,11 +6956,11 @@ ahc_loadseq(struct ahc_softc *ahc)
}
static int
-ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
+ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch,
u_int start_instr, u_int *skip_addr)
{
- struct patch *cur_patch;
- struct patch *last_patch;
+ const struct patch *cur_patch;
+ const struct patch *last_patch;
u_int num_patches;
num_patches = ARRAY_SIZE(patches);
@@ -6447,7 +7019,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
case AIC_OP_JE:
case AIC_OP_JZ:
{
- struct patch *cur_patch;
+ const struct patch *cur_patch;
int address_offset;
u_int address;
u_int skip_addr;
@@ -6545,7 +7117,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
}
int
-ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries,
+ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
const char *name, u_int address, u_int value,
u_int *cur_column, u_int wrap_point)
{
@@ -7229,7 +7801,7 @@ ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
ahc_outb(ahc, SCSIID, scsiid);
}
-void
+static void
ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
{
struct target_cmd *cmd;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_inline.h b/drivers/scsi/aic7xxx/aic7xxx_inline.h
index cba2f23bbe7..09bf2f4d78d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_inline.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_inline.h
@@ -46,179 +46,13 @@
#define _AIC7XXX_INLINE_H_
/************************* Sequencer Execution Control ************************/
-static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc);
-static __inline int ahc_is_paused(struct ahc_softc *ahc);
-static __inline void ahc_pause(struct ahc_softc *ahc);
-static __inline void ahc_unpause(struct ahc_softc *ahc);
-
-/*
- * Work around any chip bugs related to halting sequencer execution.
- * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
- * reading a register that will set this signal and deassert it.
- * Without this workaround, if the chip is paused, by an interrupt or
- * manual pause while accessing scb ram, accesses to certain registers
- * will hang the system (infinite pci retries).
- */
-static __inline void
-ahc_pause_bug_fix(struct ahc_softc *ahc)
-{
- if ((ahc->features & AHC_ULTRA2) != 0)
- (void)ahc_inb(ahc, CCSCBCTL);
-}
-
-/*
- * Determine whether the sequencer has halted code execution.
- * Returns non-zero status if the sequencer is stopped.
- */
-static __inline int
-ahc_is_paused(struct ahc_softc *ahc)
-{
- return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
-}
-
-/*
- * Request that the sequencer stop and wait, indefinitely, for it
- * to stop. The sequencer will only acknowledge that it is paused
- * once it has reached an instruction boundary and PAUSEDIS is
- * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
- * for critical sections.
- */
-static __inline void
-ahc_pause(struct ahc_softc *ahc)
-{
- ahc_outb(ahc, HCNTRL, ahc->pause);
-
- /*
- * Since the sequencer can disable pausing in a critical section, we
- * must loop until it actually stops.
- */
- while (ahc_is_paused(ahc) == 0)
- ;
-
- ahc_pause_bug_fix(ahc);
-}
-
-/*
- * Allow the sequencer to continue program execution.
- * We check here to ensure that no additional interrupt
- * sources that would cause the sequencer to halt have been
- * asserted. If, for example, a SCSI bus reset is detected
- * while we are fielding a different, pausing, interrupt type,
- * we don't want to release the sequencer before going back
- * into our interrupt handler and dealing with this new
- * condition.
- */
-static __inline void
-ahc_unpause(struct ahc_softc *ahc)
-{
- if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
- ahc_outb(ahc, HCNTRL, ahc->unpause);
-}
-
-/*********************** Untagged Transaction Routines ************************/
-static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
-static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
-
-/*
- * Block our completion routine from starting the next untagged
- * transaction for this target or target lun.
- */
-static __inline void
-ahc_freeze_untagged_queues(struct ahc_softc *ahc)
-{
- if ((ahc->flags & AHC_SCB_BTT) == 0)
- ahc->untagged_queue_lock++;
-}
-
-/*
- * Allow the next untagged transaction for this target or target lun
- * to be executed. We use a counting semaphore to allow the lock
- * to be acquired recursively. Once the count drops to zero, the
- * transaction queues will be run.
- */
-static __inline void
-ahc_release_untagged_queues(struct ahc_softc *ahc)
-{
- if ((ahc->flags & AHC_SCB_BTT) == 0) {
- ahc->untagged_queue_lock--;
- if (ahc->untagged_queue_lock == 0)
- ahc_run_untagged_queues(ahc);
- }
-}
+int ahc_is_paused(struct ahc_softc *ahc);
+void ahc_pause(struct ahc_softc *ahc);
+void ahc_unpause(struct ahc_softc *ahc);
/************************** Memory mapping routines ***************************/
-static __inline struct ahc_dma_seg *
- ahc_sg_bus_to_virt(struct scb *scb,
- uint32_t sg_busaddr);
-static __inline uint32_t
- ahc_sg_virt_to_bus(struct scb *scb,
- struct ahc_dma_seg *sg);
-static __inline uint32_t
- ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index);
-static __inline void ahc_sync_scb(struct ahc_softc *ahc,
- struct scb *scb, int op);
-static __inline void ahc_sync_sglist(struct ahc_softc *ahc,
- struct scb *scb, int op);
-static __inline uint32_t
- ahc_targetcmd_offset(struct ahc_softc *ahc,
- u_int index);
-
-static __inline struct ahc_dma_seg *
-ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
-{
- int sg_index;
-
- sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
- /* sg_list_phys points to entry 1, not 0 */
- sg_index++;
-
- return (&scb->sg_list[sg_index]);
-}
-
-static __inline uint32_t
-ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
-{
- int sg_index;
-
- /* sg_list_phys points to entry 1, not 0 */
- sg_index = sg - &scb->sg_list[1];
-
- return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
-}
-
-static __inline uint32_t
-ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
-{
- return (ahc->scb_data->hscb_busaddr
- + (sizeof(struct hardware_scb) * index));
-}
-
-static __inline void
-ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
-{
- ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
- ahc->scb_data->hscb_dmamap,
- /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
- /*len*/sizeof(*scb->hscb), op);
-}
-
-static __inline void
-ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
-{
- if (scb->sg_count == 0)
- return;
-
- ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
- /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
- * sizeof(struct ahc_dma_seg),
- /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
-}
-
-static __inline uint32_t
-ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
-{
- return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
-}
+void ahc_sync_sglist(struct ahc_softc *ahc,
+ struct scb *scb, int op);
/******************************** Debugging ***********************************/
static __inline char *ahc_name(struct ahc_softc *ahc);
@@ -231,420 +65,34 @@ ahc_name(struct ahc_softc *ahc)
/*********************** Miscellaneous Support Functions ***********************/
-static __inline void ahc_update_residual(struct ahc_softc *ahc,
- struct scb *scb);
-static __inline struct ahc_initiator_tinfo *
- ahc_fetch_transinfo(struct ahc_softc *ahc,
- char channel, u_int our_id,
- u_int remote_id,
- struct ahc_tmode_tstate **tstate);
-static __inline uint16_t
- ahc_inw(struct ahc_softc *ahc, u_int port);
-static __inline void ahc_outw(struct ahc_softc *ahc, u_int port,
- u_int value);
-static __inline uint32_t
- ahc_inl(struct ahc_softc *ahc, u_int port);
-static __inline void ahc_outl(struct ahc_softc *ahc, u_int port,
- uint32_t value);
-static __inline uint64_t
- ahc_inq(struct ahc_softc *ahc, u_int port);
-static __inline void ahc_outq(struct ahc_softc *ahc, u_int port,
- uint64_t value);
-static __inline struct scb*
- ahc_get_scb(struct ahc_softc *ahc);
-static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
-static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc,
- struct scb *scb);
-static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
-static __inline struct scsi_sense_data *
- ahc_get_sense_buf(struct ahc_softc *ahc,
- struct scb *scb);
-static __inline uint32_t
- ahc_get_sense_bufaddr(struct ahc_softc *ahc,
- struct scb *scb);
-
-/*
- * Determine whether the sequencer reported a residual
- * for this SCB/transaction.
- */
-static __inline void
-ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
-{
- uint32_t sgptr;
-
- sgptr = ahc_le32toh(scb->hscb->sgptr);
- if ((sgptr & SG_RESID_VALID) != 0)
- ahc_calc_residual(ahc, scb);
-}
-
-/*
- * Return pointers to the transfer negotiation information
- * for the specified our_id/remote_id pair.
- */
-static __inline struct ahc_initiator_tinfo *
-ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
- u_int remote_id, struct ahc_tmode_tstate **tstate)
-{
- /*
- * Transfer data structures are stored from the perspective
- * of the target role. Since the parameters for a connection
- * in the initiator role to a given target are the same as
- * when the roles are reversed, we pretend we are the target.
- */
- if (channel == 'B')
- our_id += 8;
- *tstate = ahc->enabled_targets[our_id];
- return (&(*tstate)->transinfo[remote_id]);
-}
-
-static __inline uint16_t
-ahc_inw(struct ahc_softc *ahc, u_int port)
-{
- uint16_t r = ahc_inb(ahc, port+1) << 8;
- return r | ahc_inb(ahc, port);
-}
-
-static __inline void
-ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
-{
- ahc_outb(ahc, port, value & 0xFF);
- ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
-}
-
-static __inline uint32_t
-ahc_inl(struct ahc_softc *ahc, u_int port)
-{
- return ((ahc_inb(ahc, port))
- | (ahc_inb(ahc, port+1) << 8)
- | (ahc_inb(ahc, port+2) << 16)
- | (ahc_inb(ahc, port+3) << 24));
-}
-
-static __inline void
-ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
-{
- ahc_outb(ahc, port, (value) & 0xFF);
- ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
- ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
- ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
-}
-
-static __inline uint64_t
-ahc_inq(struct ahc_softc *ahc, u_int port)
-{
- return ((ahc_inb(ahc, port))
- | (ahc_inb(ahc, port+1) << 8)
- | (ahc_inb(ahc, port+2) << 16)
- | (ahc_inb(ahc, port+3) << 24)
- | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
- | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
- | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
- | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
-}
-
-static __inline void
-ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
-{
- ahc_outb(ahc, port, value & 0xFF);
- ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
- ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
- ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
- ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
- ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
- ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
- ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
-}
-
-/*
- * Get a free scb. If there are none, see if we can allocate a new SCB.
- */
-static __inline struct scb *
-ahc_get_scb(struct ahc_softc *ahc)
-{
- struct scb *scb;
-
- if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
- ahc_alloc_scbs(ahc);
- scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
- if (scb == NULL)
- return (NULL);
- }
- SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
- return (scb);
-}
-
-/*
- * Return an SCB resource to the free list.
- */
-static __inline void
-ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
-{
- struct hardware_scb *hscb;
-
- hscb = scb->hscb;
- /* Clean up for the next user */
- ahc->scb_data->scbindex[hscb->tag] = NULL;
- scb->flags = SCB_FREE;
- hscb->control = 0;
-
- SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
-
- /* Notify the OSM that a resource is now available. */
- ahc_platform_scb_free(ahc, scb);
-}
-
-static __inline struct scb *
-ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
-{
- struct scb* scb;
-
- scb = ahc->scb_data->scbindex[tag];
- if (scb != NULL)
- ahc_sync_scb(ahc, scb,
- BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
- return (scb);
-}
-
-static __inline void
-ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
-{
- struct hardware_scb *q_hscb;
- u_int saved_tag;
-
- /*
- * Our queuing method is a bit tricky. The card
- * knows in advance which HSCB to download, and we
- * can't disappoint it. To achieve this, the next
- * SCB to download is saved off in ahc->next_queued_scb.
- * When we are called to queue "an arbitrary scb",
- * we copy the contents of the incoming HSCB to the one
- * the sequencer knows about, swap HSCB pointers and
- * finally assign the SCB to the tag indexed location
- * in the scb_array. This makes sure that we can still
- * locate the correct SCB by SCB_TAG.
- */
- q_hscb = ahc->next_queued_scb->hscb;
- saved_tag = q_hscb->tag;
- memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
- if ((scb->flags & SCB_CDB32_PTR) != 0) {
- q_hscb->shared_data.cdb_ptr =
- ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
- + offsetof(struct hardware_scb, cdb32));
- }
- q_hscb->tag = saved_tag;
- q_hscb->next = scb->hscb->tag;
-
- /* Now swap HSCB pointers. */
- ahc->next_queued_scb->hscb = scb->hscb;
- scb->hscb = q_hscb;
-
- /* Now define the mapping from tag to SCB in the scbindex */
- ahc->scb_data->scbindex[scb->hscb->tag] = scb;
-}
-
-/*
- * Tell the sequencer about a new transaction to execute.
- */
-static __inline void
-ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
-{
- ahc_swap_with_next_hscb(ahc, scb);
-
- if (scb->hscb->tag == SCB_LIST_NULL
- || scb->hscb->next == SCB_LIST_NULL)
- panic("Attempt to queue invalid SCB tag %x:%x\n",
- scb->hscb->tag, scb->hscb->next);
-
- /*
- * Setup data "oddness".
- */
- scb->hscb->lun &= LID;
- if (ahc_get_transfer_length(scb) & 0x1)
- scb->hscb->lun |= SCB_XFERLEN_ODD;
-
- /*
- * Keep a history of SCBs we've downloaded in the qinfifo.
- */
- ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
-
- /*
- * Make sure our data is consistent from the
- * perspective of the adapter.
- */
- ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
-
- /* Tell the adapter about the newly queued SCB */
- if ((ahc->features & AHC_QUEUE_REGS) != 0) {
- ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
- } else {
- if ((ahc->features & AHC_AUTOPAUSE) == 0)
- ahc_pause(ahc);
- ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
- if ((ahc->features & AHC_AUTOPAUSE) == 0)
- ahc_unpause(ahc);
- }
-}
-
-static __inline struct scsi_sense_data *
-ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
-{
- int offset;
-
- offset = scb - ahc->scb_data->scbarray;
- return (&ahc->scb_data->sense[offset]);
-}
-
-static __inline uint32_t
-ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
-{
- int offset;
-
- offset = scb - ahc->scb_data->scbarray;
- return (ahc->scb_data->sense_busaddr
- + (offset * sizeof(struct scsi_sense_data)));
-}
+struct ahc_initiator_tinfo *
+ ahc_fetch_transinfo(struct ahc_softc *ahc,
+ char channel, u_int our_id,
+ u_int remote_id,
+ struct ahc_tmode_tstate **tstate);
+uint16_t
+ ahc_inw(struct ahc_softc *ahc, u_int port);
+void ahc_outw(struct ahc_softc *ahc, u_int port,
+ u_int value);
+uint32_t
+ ahc_inl(struct ahc_softc *ahc, u_int port);
+void ahc_outl(struct ahc_softc *ahc, u_int port,
+ uint32_t value);
+uint64_t
+ ahc_inq(struct ahc_softc *ahc, u_int port);
+void ahc_outq(struct ahc_softc *ahc, u_int port,
+ uint64_t value);
+struct scb*
+ ahc_get_scb(struct ahc_softc *ahc);
+void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
+struct scb *
+ ahc_lookup_scb(struct ahc_softc *ahc, u_int tag);
+void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
+struct scsi_sense_data *
+ ahc_get_sense_buf(struct ahc_softc *ahc,
+ struct scb *scb);
/************************** Interrupt Processing ******************************/
-static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op);
-static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op);
-static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc);
-static __inline int ahc_intr(struct ahc_softc *ahc);
-
-static __inline void
-ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
-{
- ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
- /*offset*/0, /*len*/256, op);
-}
-
-static __inline void
-ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
-{
-#ifdef AHC_TARGET_MODE
- if ((ahc->flags & AHC_TARGETROLE) != 0) {
- ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
- ahc->shared_data_dmamap,
- ahc_targetcmd_offset(ahc, 0),
- sizeof(struct target_cmd) * AHC_TMODE_CMDS,
- op);
- }
-#endif
-}
-
-/*
- * See if the firmware has posted any completed commands
- * into our in-core command complete fifos.
- */
-#define AHC_RUN_QOUTFIFO 0x1
-#define AHC_RUN_TQINFIFO 0x2
-static __inline u_int
-ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
-{
- u_int retval;
-
- retval = 0;
- ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
- /*offset*/ahc->qoutfifonext, /*len*/1,
- BUS_DMASYNC_POSTREAD);
- if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
- retval |= AHC_RUN_QOUTFIFO;
-#ifdef AHC_TARGET_MODE
- if ((ahc->flags & AHC_TARGETROLE) != 0
- && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
- ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
- ahc->shared_data_dmamap,
- ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
- /*len*/sizeof(struct target_cmd),
- BUS_DMASYNC_POSTREAD);
- if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
- retval |= AHC_RUN_TQINFIFO;
- }
-#endif
- return (retval);
-}
-
-/*
- * Catch an interrupt from the adapter
- */
-static __inline int
-ahc_intr(struct ahc_softc *ahc)
-{
- u_int intstat;
-
- if ((ahc->pause & INTEN) == 0) {
- /*
- * Our interrupt is not enabled on the chip
- * and may be disabled for re-entrancy reasons,
- * so just return. This is likely just a shared
- * interrupt.
- */
- return (0);
- }
- /*
- * Instead of directly reading the interrupt status register,
- * infer the cause of the interrupt by checking our in-core
- * completion queues. This avoids a costly PCI bus read in
- * most cases.
- */
- if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
- && (ahc_check_cmdcmpltqueues(ahc) != 0))
- intstat = CMDCMPLT;
- else {
- intstat = ahc_inb(ahc, INTSTAT);
- }
-
- if ((intstat & INT_PEND) == 0) {
-#if AHC_PCI_CONFIG > 0
- if (ahc->unsolicited_ints > 500) {
- ahc->unsolicited_ints = 0;
- if ((ahc->chip & AHC_PCI) != 0
- && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
- ahc->bus_intr(ahc);
- }
-#endif
- ahc->unsolicited_ints++;
- return (0);
- }
- ahc->unsolicited_ints = 0;
-
- if (intstat & CMDCMPLT) {
- ahc_outb(ahc, CLRINT, CLRCMDINT);
-
- /*
- * Ensure that the chip sees that we've cleared
- * this interrupt before we walk the output fifo.
- * Otherwise, we may, due to posted bus writes,
- * clear the interrupt after we finish the scan,
- * and after the sequencer has added new entries
- * and asserted the interrupt again.
- */
- ahc_flush_device_writes(ahc);
- ahc_run_qoutfifo(ahc);
-#ifdef AHC_TARGET_MODE
- if ((ahc->flags & AHC_TARGETROLE) != 0)
- ahc_run_tqinfifo(ahc, /*paused*/FALSE);
-#endif
- }
-
- /*
- * Handle statuses that may invalidate our cached
- * copy of INTSTAT separately.
- */
- if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
- /* Hot eject. Do nothing */
- } else if (intstat & BRKADRINT) {
- ahc_handle_brkadrint(ahc);
- } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
-
- ahc_pause_bug_fix(ahc);
-
- if ((intstat & SEQINT) != 0)
- ahc_handle_seqint(ahc, intstat);
-
- if ((intstat & SCSIINT) != 0)
- ahc_handle_scsiint(ahc, intstat);
- }
- return (1);
-}
+int ahc_intr(struct ahc_softc *ahc);
#endif /* _AIC7XXX_INLINE_H_ */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 42ad48e09f0..fd2b9785ff4 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -388,14 +388,83 @@ static int aic7xxx_setup(char *s);
static int ahc_linux_unit;
+/************************** OS Utility Wrappers *******************************/
+void
+ahc_delay(long usec)
+{
+ /*
+ * udelay on Linux can have problems for
+ * multi-millisecond waits. Wait at most
+ * 1024us per call.
+ */
+ while (usec > 0) {
+ udelay(usec % 1024);
+ usec -= 1024;
+ }
+}
+
+/***************************** Low Level I/O **********************************/
+uint8_t
+ahc_inb(struct ahc_softc * ahc, long port)
+{
+ uint8_t x;
+
+ if (ahc->tag == BUS_SPACE_MEMIO) {
+ x = readb(ahc->bsh.maddr + port);
+ } else {
+ x = inb(ahc->bsh.ioport + port);
+ }
+ mb();
+ return (x);
+}
+
+void
+ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
+{
+ if (ahc->tag == BUS_SPACE_MEMIO) {
+ writeb(val, ahc->bsh.maddr + port);
+ } else {
+ outb(val, ahc->bsh.ioport + port);
+ }
+ mb();
+}
+
+void
+ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ ahc_outb(ahc, port, *array++);
+}
+
+void
+ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+ int i;
+
+ /*
+ * There is probably a more efficient way to do this on Linux
+ * but we don't use this for anything speed critical and this
+ * should work.
+ */
+ for (i = 0; i < count; i++)
+ *array++ = ahc_inb(ahc, port);
+}
+
/********************************* Inlines ************************************/
-static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
+static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
-static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
+static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
struct ahc_dma_seg *sg,
dma_addr_t addr, bus_size_t len);
-static __inline void
+static void
ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
{
struct scsi_cmnd *cmd;
@@ -406,7 +475,7 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
scsi_dma_unmap(cmd);
}
-static __inline int
+static int
ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
{
@@ -442,13 +511,11 @@ ahc_linux_info(struct Scsi_Host *host)
bp = &buffer[0];
ahc = *(struct ahc_softc **)host->hostdata;
memset(bp, 0, sizeof(buffer));
- strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev ");
- strcat(bp, AIC7XXX_DRIVER_VERSION);
- strcat(bp, "\n");
- strcat(bp, " <");
+ strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n"
+ " <");
strcat(bp, ahc->description);
- strcat(bp, ">\n");
- strcat(bp, " ");
+ strcat(bp, ">\n"
+ " ");
ahc_controller_info(ahc, ahc_info);
strcat(bp, ahc_info);
strcat(bp, "\n");
@@ -964,7 +1031,7 @@ aic7xxx_setup(char *s)
char *p;
char *end;
- static struct {
+ static const struct {
const char *name;
uint32_t *flag;
} options[] = {
@@ -2317,7 +2384,7 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
unsigned int ppr_options = tinfo->goal.ppr_options;
unsigned long flags;
unsigned long offset = tinfo->goal.offset;
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
if (offset == 0)
offset = MAX_OFFSET;
@@ -2361,7 +2428,7 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
unsigned int ppr_options = 0;
unsigned int period = 0;
unsigned long flags;
- struct ahc_syncrate *syncrate = NULL;
+ const struct ahc_syncrate *syncrate = NULL;
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
@@ -2391,7 +2458,7 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
unsigned int period = tinfo->goal.period;
unsigned int width = tinfo->goal.width;
unsigned long flags;
- struct ahc_syncrate *syncrate;
+ const struct ahc_syncrate *syncrate;
if (dt && spi_max_width(starget)) {
ppr_options |= MSG_EXT_PPR_DT_REQ;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index b48dab447bd..3f7238db35e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -365,7 +365,7 @@ struct ahc_platform_data {
#define AHC_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
- uint32_t mem_busaddr; /* Mem Base Addr */
+ resource_size_t mem_busaddr; /* Mem Base Addr */
};
/************************** OS Utility Wrappers *******************************/
@@ -375,82 +375,16 @@ struct ahc_platform_data {
#define malloc(size, type, flags) kmalloc(size, flags)
#define free(ptr, type) kfree(ptr)
-static __inline void ahc_delay(long);
-static __inline void
-ahc_delay(long usec)
-{
- /*
- * udelay on Linux can have problems for
- * multi-millisecond waits. Wait at most
- * 1024us per call.
- */
- while (usec > 0) {
- udelay(usec % 1024);
- usec -= 1024;
- }
-}
+void ahc_delay(long);
/***************************** Low Level I/O **********************************/
-static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port);
-static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
-static __inline void ahc_outsb(struct ahc_softc * ahc, long port,
- uint8_t *, int count);
-static __inline void ahc_insb(struct ahc_softc * ahc, long port,
- uint8_t *, int count);
-
-static __inline uint8_t
-ahc_inb(struct ahc_softc * ahc, long port)
-{
- uint8_t x;
-
- if (ahc->tag == BUS_SPACE_MEMIO) {
- x = readb(ahc->bsh.maddr + port);
- } else {
- x = inb(ahc->bsh.ioport + port);
- }
- mb();
- return (x);
-}
-
-static __inline void
-ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
-{
- if (ahc->tag == BUS_SPACE_MEMIO) {
- writeb(val, ahc->bsh.maddr + port);
- } else {
- outb(val, ahc->bsh.ioport + port);
- }
- mb();
-}
-
-static __inline void
-ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
-{
- int i;
-
- /*
- * There is probably a more efficient way to do this on Linux
- * but we don't use this for anything speed critical and this
- * should work.
- */
- for (i = 0; i < count; i++)
- ahc_outb(ahc, port, *array++);
-}
-
-static __inline void
-ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
-{
- int i;
-
- /*
- * There is probably a more efficient way to do this on Linux
- * but we don't use this for anything speed critical and this
- * should work.
- */
- for (i = 0; i < count; i++)
- *array++ = ahc_inb(ahc, port);
-}
+uint8_t ahc_inb(struct ahc_softc * ahc, long port);
+void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
+void ahc_outsb(struct ahc_softc * ahc, long port,
+ uint8_t *, int count);
+void ahc_insb(struct ahc_softc * ahc, long port,
+ uint8_t *, int count);
/**************************** Initialization **********************************/
int ahc_linux_register_host(struct ahc_softc *,
@@ -464,9 +398,6 @@ struct info_str {
int pos;
};
-void ahc_format_transinfo(struct info_str *info,
- struct ahc_transinfo *tinfo);
-
/******************************** Locking *************************************/
/* Lock protecting internal data structures */
@@ -555,61 +486,12 @@ void ahc_linux_pci_exit(void);
int ahc_pci_map_registers(struct ahc_softc *ahc);
int ahc_pci_map_int(struct ahc_softc *ahc);
-static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
+uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
int reg, int width);
-static __inline uint32_t
-ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
-{
- switch (width) {
- case 1:
- {
- uint8_t retval;
-
- pci_read_config_byte(pci, reg, &retval);
- return (retval);
- }
- case 2:
- {
- uint16_t retval;
- pci_read_config_word(pci, reg, &retval);
- return (retval);
- }
- case 4:
- {
- uint32_t retval;
- pci_read_config_dword(pci, reg, &retval);
- return (retval);
- }
- default:
- panic("ahc_pci_read_config: Read size too big");
- /* NOTREACHED */
- return (0);
- }
-}
-
-static __inline void ahc_pci_write_config(ahc_dev_softc_t pci,
- int reg, uint32_t value,
- int width);
-
-static __inline void
-ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
-{
- switch (width) {
- case 1:
- pci_write_config_byte(pci, reg, value);
- break;
- case 2:
- pci_write_config_word(pci, reg, value);
- break;
- case 4:
- pci_write_config_dword(pci, reg, value);
- break;
- default:
- panic("ahc_pci_write_config: Write size too big");
- /* NOTREACHED */
- }
-}
+void ahc_pci_write_config(ahc_dev_softc_t pci,
+ int reg, uint32_t value,
+ int width);
static __inline int ahc_get_pci_function(ahc_dev_softc_t);
static __inline int
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 3d3eaef65fb..0d7628f1f1e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -46,7 +46,7 @@
*/
#define ID(x) ID_C(x, PCI_CLASS_STORAGE_SCSI)
-static struct pci_device_id ahc_linux_pci_id_table[] = {
+static const struct pci_device_id ahc_linux_pci_id_table[] = {
/* aic7850 based controllers */
ID(ID_AHA_2902_04_10_15_20C_30C),
/* aic7860 based controllers */
@@ -206,7 +206,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const uint64_t mask_39bit = 0x7FFFFFFFFFULL;
struct ahc_softc *ahc;
ahc_dev_softc_t pci;
- struct ahc_pci_identity *entry;
+ const struct ahc_pci_identity *entry;
char *name;
int error;
struct device *dev = &pdev->dev;
@@ -269,6 +269,57 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return (0);
}
+/******************************* PCI Routines *********************************/
+uint32_t
+ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
+{
+ switch (width) {
+ case 1:
+ {
+ uint8_t retval;
+
+ pci_read_config_byte(pci, reg, &retval);
+ return (retval);
+ }
+ case 2:
+ {
+ uint16_t retval;
+ pci_read_config_word(pci, reg, &retval);
+ return (retval);
+ }
+ case 4:
+ {
+ uint32_t retval;
+ pci_read_config_dword(pci, reg, &retval);
+ return (retval);
+ }
+ default:
+ panic("ahc_pci_read_config: Read size too big");
+ /* NOTREACHED */
+ return (0);
+ }
+}
+
+void
+ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+ switch (width) {
+ case 1:
+ pci_write_config_byte(pci, reg, value);
+ break;
+ case 2:
+ pci_write_config_word(pci, reg, value);
+ break;
+ case 4:
+ pci_write_config_dword(pci, reg, value);
+ break;
+ default:
+ panic("ahc_pci_write_config: Write size too big");
+ /* NOTREACHED */
+ }
+}
+
+
static struct pci_driver aic7xxx_pci_driver = {
.name = "aic7xxx",
.probe = ahc_linux_pci_dev_probe,
@@ -293,7 +344,7 @@ ahc_linux_pci_exit(void)
}
static int
-ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
+ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base)
{
if (aic7xxx_allow_memio == 0)
return (ENOMEM);
@@ -308,10 +359,10 @@ ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
static int
ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
- u_long *bus_addr,
+ resource_size_t *bus_addr,
uint8_t __iomem **maddr)
{
- u_long start;
+ resource_size_t start;
int error;
error = 0;
@@ -336,7 +387,7 @@ int
ahc_pci_map_registers(struct ahc_softc *ahc)
{
uint32_t command;
- u_long base;
+ resource_size_t base;
uint8_t __iomem *maddr;
int error;
@@ -374,12 +425,12 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
} else
command |= PCIM_CMD_MEMEN;
} else {
- printf("aic7xxx: PCI%d:%d:%d MEM region 0x%lx "
+ printf("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
"unavailable. Cannot memory map device.\n",
ahc_get_pci_bus(ahc->dev_softc),
ahc_get_pci_slot(ahc->dev_softc),
ahc_get_pci_function(ahc->dev_softc),
- base);
+ (unsigned long long)base);
}
/*
@@ -390,15 +441,15 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
error = ahc_linux_pci_reserve_io_region(ahc, &base);
if (error == 0) {
ahc->tag = BUS_SPACE_PIO;
- ahc->bsh.ioport = base;
+ ahc->bsh.ioport = (u_long)base;
command |= PCIM_CMD_PORTEN;
} else {
- printf("aic7xxx: PCI%d:%d:%d IO region 0x%lx[0..255] "
+ printf("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
"unavailable. Cannot map device.\n",
ahc_get_pci_bus(ahc->dev_softc),
ahc_get_pci_slot(ahc->dev_softc),
ahc_get_pci_function(ahc->dev_softc),
- base);
+ (unsigned long long)base);
}
}
ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 56848f41e4f..c07cb6eebb0 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -168,8 +168,7 @@ static ahc_device_setup_t ahc_aha394XX_setup;
static ahc_device_setup_t ahc_aha494XX_setup;
static ahc_device_setup_t ahc_aha398XX_setup;
-static struct ahc_pci_identity ahc_pci_ident_table [] =
-{
+static const struct ahc_pci_identity ahc_pci_ident_table[] = {
/* aic7850 based controllers */
{
ID_AHA_2902_04_10_15_20C_30C,
@@ -668,7 +667,7 @@ ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
return (result);
}
-struct ahc_pci_identity *
+const struct ahc_pci_identity *
ahc_find_pci_device(ahc_dev_softc_t pci)
{
uint64_t full_id;
@@ -676,7 +675,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
uint16_t vendor;
uint16_t subdevice;
uint16_t subvendor;
- struct ahc_pci_identity *entry;
+ const struct ahc_pci_identity *entry;
u_int i;
vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -710,7 +709,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
}
int
-ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry)
+ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
{
u_int command;
u_int our_id;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 99e5443e753..e92991a7c48 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -58,7 +58,7 @@ static int ahc_proc_write_seeprom(struct ahc_softc *ahc,
* Table of syncrates that don't follow the "divisible by 4"
* rule. This table will be expanded in future SCSI specs.
*/
-static struct {
+static const struct {
u_int period_factor;
u_int period; /* in 100ths of ns */
} scsi_syncrates[] = {
@@ -137,7 +137,7 @@ copy_info(struct info_str *info, char *fmt, ...)
return (len);
}
-void
+static void
ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
{
u_int speed;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
index 88bfd767c51..309a562b009 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -8,7 +8,7 @@
#include "aic7xxx_osm.h"
-static ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
{ "SCSIRSTO", 0x01, 0x01 },
{ "ENAUTOATNP", 0x02, 0x02 },
{ "ENAUTOATNI", 0x04, 0x04 },
@@ -26,7 +26,7 @@ ahc_scsiseq_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x00, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
+static const ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
{ "CLRCHN", 0x02, 0x02 },
{ "SCAMEN", 0x04, 0x04 },
{ "SPIOEN", 0x08, 0x08 },
@@ -43,7 +43,7 @@ ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x01, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
+static const ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
{ "STPWEN", 0x01, 0x01 },
{ "ACTNEGEN", 0x02, 0x02 },
{ "ENSTIMER", 0x04, 0x04 },
@@ -60,7 +60,7 @@ ahc_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x02, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
{ "ACKO", 0x01, 0x01 },
{ "REQO", 0x02, 0x02 },
{ "BSYO", 0x04, 0x04 },
@@ -85,7 +85,7 @@ ahc_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x03, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
{ "ACKI", 0x01, 0x01 },
{ "REQI", 0x02, 0x02 },
{ "BSYI", 0x04, 0x04 },
@@ -112,7 +112,7 @@ ahc_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x03, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
{ "SINGLE_EDGE", 0x10, 0x10 },
{ "ENABLE_CRC", 0x40, 0x40 },
{ "WIDEXFER", 0x80, 0x80 },
@@ -128,7 +128,7 @@ ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x04, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSIID_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIID_parse_table[] = {
{ "TWIN_CHNLB", 0x80, 0x80 },
{ "OID", 0x0f, 0x0f },
{ "TWIN_TID", 0x70, 0x70 },
@@ -151,20 +151,13 @@ ahc_scsidatl_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_scsidath_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCSIDATH",
- 0x07, regvalue, cur_col, wrap));
-}
-
-int
ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "STCNT",
0x08, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
+static const ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
{ "DIS_MSGIN_DUALEDGE", 0x01, 0x01 },
{ "AUTO_MSGOUT_DE", 0x02, 0x02 },
{ "SCSIDATL_IMGEN", 0x04, 0x04 },
@@ -190,7 +183,7 @@ ahc_targcrccnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0a, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
+static const ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
{ "CLRSPIORDY", 0x02, 0x02 },
{ "CLRSWRAP", 0x08, 0x08 },
{ "CLRIOERR", 0x08, 0x08 },
@@ -206,7 +199,7 @@ ahc_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
{ "DMADONE", 0x01, 0x01 },
{ "SPIORDY", 0x02, 0x02 },
{ "SDONE", 0x04, 0x04 },
@@ -225,7 +218,7 @@ ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
+static const ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
{ "CLRREQINIT", 0x01, 0x01 },
{ "CLRPHASECHG", 0x02, 0x02 },
{ "CLRSCSIPERR", 0x04, 0x04 },
@@ -242,7 +235,7 @@ ahc_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0c, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
{ "REQINIT", 0x01, 0x01 },
{ "PHASECHG", 0x02, 0x02 },
{ "SCSIPERR", 0x04, 0x04 },
@@ -260,7 +253,7 @@ ahc_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0c, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
{ "DUAL_EDGE_ERR", 0x01, 0x01 },
{ "CRCREQERR", 0x02, 0x02 },
{ "CRCENDERR", 0x04, 0x04 },
@@ -278,7 +271,7 @@ ahc_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0d, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
{ "OFFCNT", 0x0f, 0x0f },
{ "U2OFFCNT", 0x7f, 0x7f },
{ "SCSICNT", 0xf0, 0xf0 }
@@ -291,7 +284,7 @@ ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0e, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
{ "OID", 0x0f, 0x0f },
{ "TID", 0xf0, 0xf0 }
};
@@ -303,7 +296,7 @@ ahc_scsiid_ultra2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0f, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
+static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
{ "ENDMADONE", 0x01, 0x01 },
{ "ENSPIORDY", 0x02, 0x02 },
{ "ENSDONE", 0x04, 0x04 },
@@ -321,7 +314,7 @@ ahc_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x10, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
+static const ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
{ "ENREQINIT", 0x01, 0x01 },
{ "ENPHASECHG", 0x02, 0x02 },
{ "ENSCSIPERR", 0x04, 0x04 },
@@ -347,33 +340,13 @@ ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_scsibush_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCSIBUSH",
- 0x13, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t SXFRCTL2_parse_table[] = {
- { "CMDDMAEN", 0x08, 0x08 },
- { "AUTORSTDIS", 0x10, 0x10 },
- { "ASYNC_SETUP", 0x07, 0x07 }
-};
-
-int
-ahc_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
- 0x13, regvalue, cur_col, wrap));
-}
-
-int
ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "SHADDR",
0x14, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
+static const ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
{ "STAGE1", 0x01, 0x01 },
{ "STAGE2", 0x02, 0x02 },
{ "STAGE3", 0x04, 0x04 },
@@ -389,7 +362,7 @@ ahc_seltimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x18, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SELID_parse_table[] = {
+static const ahc_reg_parse_entry_t SELID_parse_table[] = {
{ "ONEBIT", 0x08, 0x08 },
{ "SELID_MASK", 0xf0, 0xf0 }
};
@@ -401,21 +374,6 @@ ahc_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x19, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCAMCTL_parse_table[] = {
- { "DFLTTID", 0x10, 0x10 },
- { "ALTSTIM", 0x20, 0x20 },
- { "CLRSCAMSELID", 0x40, 0x40 },
- { "ENSCAMSELO", 0x80, 0x80 },
- { "SCAMLVL", 0x03, 0x03 }
-};
-
-int
-ahc_scamctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCAMCTL_parse_table, 5, "SCAMCTL",
- 0x1a, regvalue, cur_col, wrap));
-}
-
int
ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -423,7 +381,7 @@ ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
+static const ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
{ "SSPIOCPS", 0x01, 0x01 },
{ "ROM", 0x02, 0x02 },
{ "EEPROM", 0x04, 0x04 },
@@ -441,7 +399,7 @@ ahc_spiocap_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
{ "BRDCTL0", 0x01, 0x01 },
{ "BRDSTB_ULTRA2", 0x01, 0x01 },
{ "BRDCTL1", 0x02, 0x02 },
@@ -464,7 +422,7 @@ ahc_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1d, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEECTL_parse_table[] = {
+static const ahc_reg_parse_entry_t SEECTL_parse_table[] = {
{ "SEEDI", 0x01, 0x01 },
{ "SEEDO", 0x02, 0x02 },
{ "SEECK", 0x04, 0x04 },
@@ -482,7 +440,7 @@ ahc_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1e, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
{ "XCVR", 0x01, 0x01 },
{ "SELWIDE", 0x02, 0x02 },
{ "ENAB20", 0x04, 0x04 },
@@ -522,13 +480,6 @@ ahc_disc_dsb_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_cmdsize_table_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL",
- 0x34, regvalue, cur_col, wrap));
-}
-
-int
ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "MWI_RESIDUAL",
@@ -549,7 +500,7 @@ ahc_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3a, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
+static const ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
{ "FIFORESET", 0x01, 0x01 },
{ "FIFOFLUSH", 0x02, 0x02 },
{ "DIRECTION", 0x04, 0x04 },
@@ -569,7 +520,7 @@ ahc_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3b, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
{ "NO_DISCONNECT", 0x01, 0x01 },
{ "SPHASE_PENDING", 0x02, 0x02 },
{ "DPHASE_PENDING", 0x04, 0x04 },
@@ -602,7 +553,7 @@ ahc_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3e, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
+static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
{ "MSGI", 0x20, 0x20 },
{ "IOI", 0x40, 0x40 },
{ "CDI", 0x80, 0x80 },
@@ -645,13 +596,6 @@ ahc_free_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_complete_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "COMPLETE_SCBH",
- 0x43, regvalue, cur_col, wrap));
-}
-
-int
ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "HSCB_ADDR",
@@ -700,7 +644,7 @@ ahc_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x50, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t ARG_1_parse_table[] = {
+static const ahc_reg_parse_entry_t ARG_1_parse_table[] = {
{ "CONT_TARG_SESSION", 0x02, 0x02 },
{ "CONT_MSG_LOOP", 0x04, 0x04 },
{ "EXIT_MSG_LOOP", 0x08, 0x08 },
@@ -731,7 +675,7 @@ ahc_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x53, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
{ "ENAUTOATNP", 0x02, 0x02 },
{ "ENAUTOATNI", 0x04, 0x04 },
{ "ENAUTOATNO", 0x08, 0x08 },
@@ -747,7 +691,7 @@ ahc_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x54, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
+static const ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
{ "HA_274_EXTENDED_TRANS",0x01, 0x01 }
};
@@ -758,7 +702,7 @@ ahc_ha_274_biosglobal_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x56, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
{ "SCB_DMA", 0x01, 0x01 },
{ "TARGET_MSG_PENDING", 0x02, 0x02 }
};
@@ -770,7 +714,7 @@ ahc_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x57, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
{ "ENSPCHK", 0x20, 0x20 },
{ "RESET_SCSI", 0x40, 0x40 },
{ "TERM_ENB", 0x80, 0x80 },
@@ -785,7 +729,7 @@ ahc_scsiconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5a, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t INTDEF_parse_table[] = {
+static const ahc_reg_parse_entry_t INTDEF_parse_table[] = {
{ "EDGE_TRIG", 0x80, 0x80 },
{ "VECTOR", 0x0f, 0x0f }
};
@@ -804,7 +748,7 @@ ahc_hostconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5d, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
+static const ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
{ "CHANNEL_B_PRIMARY", 0x08, 0x08 },
{ "BIOSMODE", 0x30, 0x30 },
{ "BIOSDISABLED", 0x30, 0x30 }
@@ -817,7 +761,7 @@ ahc_ha_274_biosctrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5f, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
{ "LOADRAM", 0x01, 0x01 },
{ "SEQRESET", 0x02, 0x02 },
{ "STEP", 0x04, 0x04 },
@@ -849,7 +793,7 @@ ahc_seqaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x62, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
{ "SEQADDR1_MASK", 0x01, 0x01 }
};
@@ -902,7 +846,7 @@ ahc_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x6a, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t FLAGS_parse_table[] = {
+static const ahc_reg_parse_entry_t FLAGS_parse_table[] = {
{ "CARRY", 0x01, 0x01 },
{ "ZERO", 0x02, 0x02 }
};
@@ -929,13 +873,6 @@ ahc_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "FUNCTION1",
- 0x6e, regvalue, cur_col, wrap));
-}
-
-int
ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "STACK",
@@ -956,19 +893,7 @@ ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x70, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t BCTL_parse_table[] = {
- { "ENABLE", 0x01, 0x01 },
- { "ACE", 0x08, 0x08 }
-};
-
-int
-ahc_bctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(BCTL_parse_table, 2, "BCTL",
- 0x84, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
+static const ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
{ "CIOPARCKEN", 0x01, 0x01 },
{ "USCBSIZE32", 0x02, 0x02 },
{ "RAMPS", 0x04, 0x04 },
@@ -986,7 +911,7 @@ ahc_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x84, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
+static const ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
{ "BON", 0x0f, 0x0f },
{ "BOFF", 0xf0, 0xf0 }
};
@@ -998,7 +923,7 @@ ahc_bustime_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x85, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
+static const ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
{ "HADDLDSEL0", 0x01, 0x01 },
{ "HADDLDSEL1", 0x02, 0x02 },
{ "DSLATT", 0xfc, 0xfc }
@@ -1011,7 +936,7 @@ ahc_dscommand1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x85, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
+static const ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
{ "STBON", 0x07, 0x07 },
{ "STBOFF", 0x38, 0x38 },
{ "DFTHRSH_75", 0x80, 0x80 },
@@ -1026,7 +951,7 @@ ahc_busspd_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x86, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
+static const ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
{ "SEQ_MAILBOX", 0x0f, 0x0f },
{ "HOST_TQINPOS", 0x80, 0x80 },
{ "HOST_MAILBOX", 0xf0, 0xf0 }
@@ -1039,7 +964,7 @@ ahc_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x86, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
+static const ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
{ "DFTHRSH_100", 0xc0, 0xc0 }
};
@@ -1050,7 +975,7 @@ ahc_dspcistatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x86, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
+static const ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
{ "CHIPRST", 0x01, 0x01 },
{ "CHIPRSTACK", 0x01, 0x01 },
{ "INTEN", 0x02, 0x02 },
@@ -1088,7 +1013,7 @@ ahc_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x90, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
+static const ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
{ "SEQINT", 0x01, 0x01 },
{ "CMDCMPLT", 0x02, 0x02 },
{ "SCSIINT", 0x04, 0x04 },
@@ -1119,7 +1044,7 @@ ahc_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x91, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CLRINT_parse_table[] = {
+static const ahc_reg_parse_entry_t CLRINT_parse_table[] = {
{ "CLRSEQINT", 0x01, 0x01 },
{ "CLRCMDINT", 0x02, 0x02 },
{ "CLRSCSIINT", 0x04, 0x04 },
@@ -1134,7 +1059,7 @@ ahc_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x92, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t ERROR_parse_table[] = {
+static const ahc_reg_parse_entry_t ERROR_parse_table[] = {
{ "ILLHADDR", 0x01, 0x01 },
{ "ILLSADDR", 0x02, 0x02 },
{ "ILLOPCODE", 0x04, 0x04 },
@@ -1152,7 +1077,7 @@ ahc_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x92, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
+static const ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
{ "FIFORESET", 0x01, 0x01 },
{ "FIFOFLUSH", 0x02, 0x02 },
{ "DIRECTION", 0x04, 0x04 },
@@ -1172,7 +1097,7 @@ ahc_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x93, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
+static const ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
{ "FIFOEMP", 0x01, 0x01 },
{ "FIFOFULL", 0x02, 0x02 },
{ "DFTHRESH", 0x04, 0x04 },
@@ -1198,20 +1123,13 @@ ahc_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "DFRADDR",
- 0x97, regvalue, cur_col, wrap));
-}
-
-int
ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "DFDAT",
0x99, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
+static const ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
{ "SCBAUTO", 0x80, 0x80 },
{ "SCBCNT_MASK", 0x1f, 0x1f }
};
@@ -1231,20 +1149,13 @@ ahc_qinfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_qincnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "QINCNT",
- 0x9c, regvalue, cur_col, wrap));
-}
-
-int
ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "QOUTFIFO",
0x9d, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
+static const ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
{ "TARGCRCCNTEN", 0x04, 0x04 },
{ "TARGCRCENDEN", 0x08, 0x08 },
{ "CRCREQCHKEN", 0x10, 0x10 },
@@ -1260,14 +1171,7 @@ ahc_crccontrol1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x9d, regvalue, cur_col, wrap));
}
-int
-ahc_qoutcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "QOUTCNT",
- 0x9e, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
{ "DATA_OUT_PHASE", 0x01, 0x01 },
{ "DATA_IN_PHASE", 0x02, 0x02 },
{ "MSG_OUT_PHASE", 0x04, 0x04 },
@@ -1284,7 +1188,7 @@ ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x9e, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
+static const ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
{ "ALT_MODE", 0x80, 0x80 }
};
@@ -1351,7 +1255,7 @@ ahc_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xac, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
{ "SG_LAST_SEG", 0x80, 0x80 },
{ "SG_HIGH_ADDR_BITS", 0x7f, 0x7f }
};
@@ -1363,7 +1267,7 @@ ahc_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xb0, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
{ "SG_LIST_NULL", 0x01, 0x01 },
{ "SG_FULL_RESID", 0x02, 0x02 },
{ "SG_RESID_VALID", 0x04, 0x04 }
@@ -1376,7 +1280,7 @@ ahc_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xb4, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
{ "DISCONNECTED", 0x04, 0x04 },
{ "ULTRAENB", 0x08, 0x08 },
{ "MK_MESSAGE", 0x10, 0x10 },
@@ -1394,7 +1298,7 @@ ahc_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xb8, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
{ "TWIN_CHNLB", 0x80, 0x80 },
{ "OID", 0x0f, 0x0f },
{ "TWIN_TID", 0x70, 0x70 },
@@ -1408,7 +1312,7 @@ ahc_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xb9, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
{ "SCB_XFERLEN_ODD", 0x80, 0x80 },
{ "LID", 0x3f, 0x3f }
};
@@ -1455,14 +1359,7 @@ ahc_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xbf, regvalue, cur_col, wrap));
}
-int
-ahc_scb_64_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_64_SPARE",
- 0xc0, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
+static const ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
{ "DO_2840", 0x01, 0x01 },
{ "CK_2840", 0x02, 0x02 },
{ "CS_2840", 0x04, 0x04 }
@@ -1475,7 +1372,7 @@ ahc_seectl_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xc0, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
+static const ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
{ "DI_2840", 0x01, 0x01 },
{ "EEPROM_TF", 0x80, 0x80 },
{ "ADSEL", 0x1e, 0x1e },
@@ -1524,7 +1421,7 @@ ahc_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xea, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
{ "CCSGRESET", 0x01, 0x01 },
{ "SG_FETCH_NEEDED", 0x02, 0x02 },
{ "CCSGEN", 0x08, 0x08 },
@@ -1552,7 +1449,7 @@ ahc_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xed, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
{ "CCSCBRESET", 0x01, 0x01 },
{ "CCSCBDIR", 0x04, 0x04 },
{ "CCSCBEN", 0x08, 0x08 },
@@ -1610,7 +1507,7 @@ ahc_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xf8, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
+static const ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
{ "SDSCB_ROLLOVER", 0x10, 0x10 },
{ "SNSCB_ROLLOVER", 0x20, 0x20 },
{ "SCB_AVAIL", 0x40, 0x40 },
@@ -1625,7 +1522,7 @@ ahc_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xfa, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
+static const ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
{ "RD_DFTHRSH_MIN", 0x00, 0x00 },
{ "WR_DFTHRSH_MIN", 0x00, 0x00 },
{ "RD_DFTHRSH_25", 0x01, 0x01 },
@@ -1653,7 +1550,7 @@ ahc_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xfb, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
+static const ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
{ "LAST_SEG_DONE", 0x01, 0x01 },
{ "LAST_SEG", 0x02, 0x02 },
{ "SG_ADDR_MASK", 0xf8, 0xf8 }
@@ -1666,7 +1563,7 @@ ahc_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xfc, regvalue, cur_col, wrap));
}
-static ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
+static const ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
{ "LAST_SEG_DONE", 0x01, 0x01 },
{ "LAST_SEG", 0x02, 0x02 },
{ "SG_ADDR_MASK", 0xf8, 0xf8 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
index 4cee08521e7..07e93fbae70 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
@@ -5,7 +5,7 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
*/
-static uint8_t seqprog[] = {
+static const uint8_t seqprog[] = {
0xb2, 0x00, 0x00, 0x08,
0xf7, 0x11, 0x22, 0x08,
0x00, 0x65, 0xee, 0x59,
@@ -1081,7 +1081,7 @@ ahc_patch0_func(struct ahc_softc *ahc)
return (0);
}
-static struct patch {
+static const struct patch {
ahc_patch_func_t *patch_func;
uint32_t begin :10,
skip_instr :10,
@@ -1291,7 +1291,7 @@ static struct patch {
{ ahc_patch4_func, 865, 12, 1 }
};
-static struct cs {
+static const struct cs {
uint16_t begin;
uint16_t end;
} critical_sections[] = {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 924102720b1..e4a77872030 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -362,7 +362,7 @@ output_code()
" *\n"
"%s */\n", versions);
- fprintf(ofile, "static uint8_t seqprog[] = {\n");
+ fprintf(ofile, "static const uint8_t seqprog[] = {\n");
for (cur_instr = STAILQ_FIRST(&seq_program);
cur_instr != NULL;
cur_instr = STAILQ_NEXT(cur_instr, links)) {
@@ -415,7 +415,7 @@ output_code()
}
fprintf(ofile,
-"static struct patch {\n"
+"static const struct patch {\n"
" %spatch_func_t *patch_func;\n"
" uint32_t begin :10,\n"
" skip_instr :10,\n"
@@ -435,7 +435,7 @@ output_code()
fprintf(ofile, "\n};\n\n");
fprintf(ofile,
-"static struct cs {\n"
+"static const struct cs {\n"
" uint16_t begin;\n"
" uint16_t end;\n"
"} critical_sections[] = {\n");
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 702e2dbd11f..81be6a261cc 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -101,11 +101,12 @@ static void format_3_instr(int opcode, symbol_ref_t *src,
expression_t *immed, symbol_ref_t *address);
static void test_readable_symbol(symbol_t *symbol);
static void test_writable_symbol(symbol_t *symbol);
-static void type_check(symbol_t *symbol, expression_t *expression, int and_op);
+static void type_check(symbol_ref_t *sym, expression_t *expression, int and_op);
static void make_expression(expression_t *immed, int value);
static void add_conditional(symbol_t *symbol);
static void add_version(const char *verstring);
static int is_download_const(expression_t *immed);
+static int is_location_address(symbol_t *symbol);
void yyerror(const char *string);
#define SRAM_SYMNAME "SRAM_BASE"
@@ -142,6 +143,8 @@ void yyerror(const char *string);
%token <value> T_ADDRESS
+%token T_COUNT
+
%token T_ACCESS_MODE
%token T_MODES
@@ -192,10 +195,10 @@ void yyerror(const char *string);
%token <value> T_OR
-/* 16 bit extensions */
-%token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
-%token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
-
+/* 16 bit extensions, not implemented
+ * %token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
+ * %token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
+ */
%token T_RET
%token T_NOP
@@ -214,7 +217,7 @@ void yyerror(const char *string);
%type <expression> expression immediate immediate_or_a
-%type <value> export ret f1_opcode f2_opcode f4_opcode jmp_jc_jnc_call jz_jnz je_jne
+%type <value> export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne
%type <value> mode_value mode_list macro_arglist
@@ -313,13 +316,13 @@ reg_definition:
stop("Register multiply defined", EX_DATAERR);
/* NOTREACHED */
}
- cur_symbol = $1;
+ cur_symbol = $1;
cur_symbol->type = cur_symtype;
initialize_symbol(cur_symbol);
}
reg_attribute_list
'}'
- {
+ {
/*
* Default to allowing everything in for registers
* with no bit or mask definitions.
@@ -349,9 +352,10 @@ reg_attribute_list:
| reg_attribute_list reg_attribute
;
-reg_attribute:
+reg_attribute:
reg_address
| size
+| count
| access_mode
| modes
| field_defn
@@ -392,6 +396,13 @@ size:
}
;
+count:
+ T_COUNT T_NUMBER
+ {
+ cur_symbol->count += $2;
+ }
+;
+
access_mode:
T_ACCESS_MODE T_MODE
{
@@ -641,14 +652,14 @@ expression:
&($1.referenced_syms),
&($3.referenced_syms));
}
-| expression T_EXPR_LSHIFT expression
+| expression T_EXPR_LSHIFT expression
{
$$.value = $1.value << $3.value;
symlist_merge(&$$.referenced_syms,
&$1.referenced_syms,
&$3.referenced_syms);
}
-| expression T_EXPR_RSHIFT expression
+| expression T_EXPR_RSHIFT expression
{
$$.value = $1.value >> $3.value;
symlist_merge(&$$.referenced_syms,
@@ -714,7 +725,7 @@ expression:
;
constant:
- T_CONST T_SYMBOL expression
+ T_CONST T_SYMBOL expression
{
if ($2->type != UNINITIALIZED) {
stop("Re-definition of symbol as a constant",
@@ -800,6 +811,7 @@ scratch_ram:
cur_symtype = SRAMLOC;
cur_symbol->type = SRAMLOC;
initialize_symbol(cur_symbol);
+ cur_symbol->count += 1;
}
reg_address
{
@@ -831,6 +843,7 @@ scb:
initialize_symbol(cur_symbol);
/* 64 bytes of SCB space */
cur_symbol->info.rinfo->size = 64;
+ cur_symbol->count += 1;
}
reg_address
{
@@ -1311,14 +1324,18 @@ f2_opcode:
| T_ROR { $$ = AIC_OP_ROR; }
;
-f4_opcode:
- T_OR16 { $$ = AIC_OP_OR16; }
-| T_AND16 { $$ = AIC_OP_AND16; }
-| T_XOR16 { $$ = AIC_OP_XOR16; }
-| T_ADD16 { $$ = AIC_OP_ADD16; }
-| T_ADC16 { $$ = AIC_OP_ADC16; }
-| T_MVI16 { $$ = AIC_OP_MVI16; }
-;
+/*
+ * 16bit opcodes, not used
+ *
+ *f4_opcode:
+ * T_OR16 { $$ = AIC_OP_OR16; }
+ *| T_AND16 { $$ = AIC_OP_AND16; }
+ *| T_XOR16 { $$ = AIC_OP_XOR16; }
+ *| T_ADD16 { $$ = AIC_OP_ADD16; }
+ *| T_ADC16 { $$ = AIC_OP_ADC16; }
+ *| T_MVI16 { $$ = AIC_OP_MVI16; }
+ *;
+ */
code:
f2_opcode destination ',' expression opt_source ret ';'
@@ -1357,6 +1374,7 @@ code:
code:
T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';'
{
+ type_check(&$2, &$4, AIC_OP_OR);
format_3_instr($5, &$2, &$4, &$6);
}
;
@@ -1528,7 +1546,7 @@ initialize_symbol(symbol_t *symbol)
sizeof(struct cond_info));
break;
case MACRO:
- symbol->info.macroinfo =
+ symbol->info.macroinfo =
(struct macro_info *)malloc(sizeof(struct macro_info));
if (symbol->info.macroinfo == NULL) {
stop("Can't create macro info", EX_SOFTWARE);
@@ -1552,7 +1570,6 @@ add_macro_arg(const char *argtext, int argnum)
struct macro_arg *marg;
int i;
int retval;
-
if (cur_symbol == NULL || cur_symbol->type != MACRO) {
stop("Invalid current symbol for adding macro arg",
@@ -1633,8 +1650,10 @@ format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed,
test_writable_symbol(dest->symbol);
test_readable_symbol(src->symbol);
- /* Ensure that immediate makes sense for this destination */
- type_check(dest->symbol, immed, opcode);
+ if (!is_location_address(dest->symbol)) {
+ /* Ensure that immediate makes sense for this destination */
+ type_check(dest, immed, opcode);
+ }
/* Allocate sequencer space for the instruction and fill it out */
instr = seq_alloc();
@@ -1766,9 +1785,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
/* Test register permissions */
test_readable_symbol(src->symbol);
- /* Ensure that immediate makes sense for this source */
- type_check(src->symbol, immed, opcode);
-
/* Allocate sequencer space for the instruction and fill it out */
instr = seq_alloc();
f3_instr = &instr->format.format3;
@@ -1797,7 +1813,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
static void
test_readable_symbol(symbol_t *symbol)
{
-
if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) {
snprintf(errbuf, sizeof(errbuf),
"Register %s unavailable in source reg mode %d",
@@ -1815,7 +1830,6 @@ test_readable_symbol(symbol_t *symbol)
static void
test_writable_symbol(symbol_t *symbol)
{
-
if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) {
snprintf(errbuf, sizeof(errbuf),
"Register %s unavailable in destination reg mode %d",
@@ -1831,25 +1845,34 @@ test_writable_symbol(symbol_t *symbol)
}
static void
-type_check(symbol_t *symbol, expression_t *expression, int opcode)
+type_check(symbol_ref_t *sym, expression_t *expression, int opcode)
{
+ symbol_t *symbol = sym->symbol;
symbol_node_t *node;
int and_op;
+ int8_t value, mask;
and_op = FALSE;
- if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || opcode == AIC_OP_JZ)
- and_op = TRUE;
-
/*
* Make sure that we aren't attempting to write something
* that hasn't been defined. If this is an and operation,
* this is a mask, so "undefined" bits are okay.
*/
- if (and_op == FALSE
- && (expression->value & ~symbol->info.rinfo->valid_bitmask) != 0) {
+ if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ ||
+ opcode == AIC_OP_JZ || opcode == AIC_OP_JNE ||
+ opcode == AIC_OP_BMOV)
+ and_op = TRUE;
+
+ /*
+ * Defaulting to 8 bit logic
+ */
+ mask = (int8_t)~symbol->info.rinfo->valid_bitmask;
+ value = (int8_t)expression->value;
+
+ if (and_op == FALSE && (mask & value) != 0 ) {
snprintf(errbuf, sizeof(errbuf),
"Invalid bit(s) 0x%x in immediate written to %s",
- expression->value & ~symbol->info.rinfo->valid_bitmask,
+ (mask & value),
symbol->name);
stop(errbuf, EX_DATAERR);
/* NOTREACHED */
@@ -1959,3 +1982,13 @@ is_download_const(expression_t *immed)
return (FALSE);
}
+
+static int
+is_location_address(symbol_t *sym)
+{
+ if (sym->type == SCBLOC ||
+ sym->type == SRAMLOC)
+ return (TRUE);
+ return (FALSE);
+}
+
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 7c3983f868a..2c7f02daf88 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -162,6 +162,7 @@ register { return T_REGISTER; }
const { yylval.value = FALSE; return T_CONST; }
download { return T_DOWNLOAD; }
address { return T_ADDRESS; }
+count { return T_COUNT; }
access_mode { return T_ACCESS_MODE; }
modes { return T_MODES; }
RW|RO|WO {
@@ -228,15 +229,15 @@ ret { return T_RET; }
nop { return T_NOP; }
/* ARP2 16bit extensions */
-or16 { return T_OR16; }
-and16 { return T_AND16; }
-xor16 { return T_XOR16; }
-add16 { return T_ADD16; }
-adc16 { return T_ADC16; }
-mvi16 { return T_MVI16; }
-test16 { return T_TEST16; }
-cmp16 { return T_CMP16; }
-cmpxchg { return T_CMPXCHG; }
+ /* or16 { return T_OR16; } */
+ /* and16 { return T_AND16; }*/
+ /* xor16 { return T_XOR16; }*/
+ /* add16 { return T_ADD16; }*/
+ /* adc16 { return T_ADC16; }*/
+ /* mvi16 { return T_MVI16; }*/
+ /* test16 { return T_TEST16; }*/
+ /* cmp16 { return T_CMP16; }*/
+ /* cmpxchg { return T_CMPXCHG; }*/
/* Allowed Symbols */
\<\< { return T_EXPR_LSHIFT; }
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index f1f448dff56..fcd357872b4 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -77,6 +77,7 @@ symbol_create(char *name)
if (new_symbol->name == NULL)
stop("Unable to strdup symbol name", EX_SOFTWARE);
new_symbol->type = UNINITIALIZED;
+ new_symbol->count = 1;
return (new_symbol);
}
@@ -198,6 +199,12 @@ symtable_get(char *name)
}
}
memcpy(&stored_ptr, data.data, sizeof(stored_ptr));
+ stored_ptr->count++;
+ data.data = &stored_ptr;
+ if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) {
+ perror("Symtable put failed");
+ exit(EX_SOFTWARE);
+ }
return (stored_ptr);
}
@@ -256,7 +263,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
&& (curnode->symbol->info.finfo->value >
newnode->symbol->info.finfo->value))))
|| (!field && (curnode->symbol->info.rinfo->address >
- newnode->symbol->info.rinfo->address))) {
+ newnode->symbol->info.rinfo->address))) {
SLIST_INSERT_HEAD(symlist, newnode, links);
return;
}
@@ -271,7 +278,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
cursymbol = SLIST_NEXT(curnode, links)->symbol;
if ((field
- && (cursymbol->type > symbol->type
+ && (cursymbol->type > symbol->type
|| (cursymbol->type == symbol->type
&& (cursymbol->info.finfo->value >
symbol->info.finfo->value))))
@@ -351,7 +358,7 @@ aic_print_reg_dump_types(FILE *ofile)
{
if (ofile == NULL)
return;
-
+
fprintf(ofile,
"typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n"
"typedef struct %sreg_parse_entry {\n"
@@ -370,7 +377,7 @@ aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode)
return;
fprintf(dfile,
-"static %sreg_parse_entry_t %s_parse_table[] = {\n",
+"static const %sreg_parse_entry_t %s_parse_table[] = {\n",
prefix,
regnode->symbol->name);
}
@@ -385,7 +392,7 @@ aic_print_reg_dump_end(FILE *ofile, FILE *dfile,
lower_name = strdup(regnode->symbol->name);
if (lower_name == NULL)
stop("Unable to strdup symbol name", EX_SOFTWARE);
-
+
for (letter = lower_name; *letter != '\0'; letter++)
*letter = tolower(*letter);
@@ -472,6 +479,7 @@ symtable_dump(FILE *ofile, FILE *dfile)
DBT key;
DBT data;
int flag;
+ int reg_count = 0, reg_used = 0;
u_int i;
if (symtable == NULL)
@@ -541,6 +549,9 @@ symtable_dump(FILE *ofile, FILE *dfile)
int num_entries;
num_entries = 0;
+ reg_count++;
+ if (curnode->symbol->count == 1)
+ break;
fields = &curnode->symbol->info.rinfo->fields;
SLIST_FOREACH(fieldnode, fields, links) {
if (num_entries == 0)
@@ -553,11 +564,14 @@ symtable_dump(FILE *ofile, FILE *dfile)
}
aic_print_reg_dump_end(ofile, dfile,
curnode, num_entries);
+ reg_used++;
}
default:
break;
}
}
+ fprintf(stderr, "%s: %d of %d register definitions used\n", appname,
+ reg_used, reg_count);
/* Fold in the masks and bits */
while (SLIST_FIRST(&masks) != NULL) {
@@ -646,7 +660,6 @@ symtable_dump(FILE *ofile, FILE *dfile)
free(curnode);
}
-
fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n");
for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) {
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index afc22e8b490..05190c1a2fb 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -128,6 +128,7 @@ typedef struct expression_info {
typedef struct symbol {
char *name;
symtype type;
+ int count;
union {
struct reg_info *rinfo;
struct field_info *finfo;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 8be3d76656f..a73a6bbb1b2 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -2286,17 +2286,14 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec,
}
}
-static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost)
+static irqreturn_t ihdlr(struct Scsi_Host *shost)
{
struct scsi_cmnd *SCpnt;
unsigned int i, k, c, status, tstatus, reg;
struct mssp *spp;
struct mscp *cpp;
struct hostdata *ha = (struct hostdata *)shost->hostdata;
-
- if (shost->irq != irq)
- panic("%s: ihdlr, irq %d, shost->irq %d.\n", ha->board_name, irq,
- shost->irq);
+ int irq = shost->irq;
/* Check if this board need to be serviced */
if (!(inb(shost->io_port + REG_AUX_STATUS) & IRQ_ASSERTED))
@@ -2535,7 +2532,7 @@ static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost)
return IRQ_NONE;
}
-static irqreturn_t do_interrupt_handler(int irq, void *shap)
+static irqreturn_t do_interrupt_handler(int dummy, void *shap)
{
struct Scsi_Host *shost;
unsigned int j;
@@ -2548,7 +2545,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap)
shost = sh[j];
spin_lock_irqsave(shost->host_lock, spin_flags);
- ret = ihdlr(irq, shost);
+ ret = ihdlr(shost);
spin_unlock_irqrestore(shost->host_lock, spin_flags);
return ret;
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index bfdee596889..a0b6d414953 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -978,7 +978,7 @@ static int esp_check_spur_intr(struct esp *esp)
*/
if (!esp->ops->dma_error(esp)) {
printk(KERN_ERR PFX "esp%d: Spurious irq, "
- "sreg=%x.\n",
+ "sreg=%02x.\n",
esp->host->unique_id, esp->sreg);
return -1;
}
@@ -1447,6 +1447,9 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
if (offset > 15)
goto do_reject;
+ if (esp->flags & ESP_FLAG_DISABLE_SYNC)
+ offset = 0;
+
if (offset) {
int rounded_up, one_clock;
@@ -1697,7 +1700,12 @@ again:
else
ent->flags &= ~ESP_CMD_FLAG_WRITE;
- dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
+ if (esp->ops->dma_length_limit)
+ dma_len = esp->ops->dma_length_limit(esp, dma_addr,
+ dma_len);
+ else
+ dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
+
esp->data_dma_len = dma_len;
if (!dma_len) {
@@ -1761,7 +1769,6 @@ again:
esp_advance_dma(esp, ent, cmd, bytes_sent);
esp_event(esp, ESP_EVENT_CHECK_PHASE);
goto again;
- break;
}
case ESP_EVENT_STATUS: {
@@ -2235,7 +2242,7 @@ static void esp_bootup_reset(struct esp *esp)
static void esp_set_clock_params(struct esp *esp)
{
- int fmhz;
+ int fhz;
u8 ccf;
/* This is getting messy but it has to be done correctly or else
@@ -2270,9 +2277,9 @@ static void esp_set_clock_params(struct esp *esp)
* This entails the smallest and largest sync period we could ever
* handle on this ESP.
*/
- fmhz = esp->cfreq;
+ fhz = esp->cfreq;
- ccf = ((fmhz / 1000000) + 4) / 5;
+ ccf = ((fhz / 1000000) + 4) / 5;
if (ccf == 1)
ccf = 2;
@@ -2281,16 +2288,16 @@ static void esp_set_clock_params(struct esp *esp)
* been unable to find the clock-frequency PROM property. All
* other machines provide useful values it seems.
*/
- if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
- fmhz = 20000000;
+ if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
+ fhz = 20000000;
ccf = 4;
}
esp->cfact = (ccf == 8 ? 0 : ccf);
- esp->cfreq = fmhz;
- esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
+ esp->cfreq = fhz;
+ esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
esp->ctick = ESP_TICK(ccf, esp->ccycle);
- esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
+ esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
esp->sync_defp = SYNC_DEFP_SLOW;
}
@@ -2382,6 +2389,12 @@ static int esp_slave_configure(struct scsi_device *dev)
struct esp_target_data *tp = &esp->target[dev->id];
int goal_tags, queue_depth;
+ if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
+ /* Bypass async domain validation */
+ dev->ppr = 0;
+ dev->sdtr = 0;
+ }
+
goal_tags = 0;
if (dev->tagged_supported) {
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index d5576d54ce7..bb43a138818 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -224,7 +224,7 @@
#define ESP_TIMEO_CONST 8192
#define ESP_NEG_DEFP(mhz, cfact) \
((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
-#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
+#define ESP_HZ_TO_CYCLE(hertz) ((1000000000) / ((hertz) / 1000))
#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
/* For slow to medium speed input clock rates we shoot for 5mb/s, but for high
@@ -240,9 +240,9 @@ struct esp_cmd_priv {
int num_sg;
} u;
- unsigned int cur_residue;
+ int cur_residue;
struct scatterlist *cur_sg;
- unsigned int tot_residue;
+ int tot_residue;
};
#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
@@ -368,6 +368,12 @@ struct esp_driver_ops {
*/
int (*irq_pending)(struct esp *esp);
+ /* Return the maximum allowable size of a DMA transfer for a
+ * given buffer.
+ */
+ u32 (*dma_length_limit)(struct esp *esp, u32 dma_addr,
+ u32 dma_len);
+
/* Reset the DMA engine entirely. On return, ESP interrupts
* should be enabled. Often the interrupt enabling is
* controlled in the DMA engine.
@@ -471,6 +477,7 @@ struct esp {
#define ESP_FLAG_DOING_SLOWCMD 0x00000004
#define ESP_FLAG_WIDE_CAPABLE 0x00000008
#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
+#define ESP_FLAG_DISABLE_SYNC 0x00000020
u8 select_state;
#define ESP_SELECT_NONE 0x00 /* Not selecting */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index c264a8c5f01..3690360d7a7 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -199,9 +199,13 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
if (!shost->can_queue) {
printk(KERN_ERR "%s: can_queue = 0 no longer supported\n",
sht->name);
- goto out;
+ goto fail;
}
+ error = scsi_setup_command_freelist(shost);
+ if (error)
+ goto fail;
+
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
@@ -255,6 +259,8 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
out_del_gendev:
device_del(&shost->shost_gendev);
out:
+ scsi_destroy_command_freelist(shost);
+ fail:
return error;
}
EXPORT_SYMBOL(scsi_add_host);
@@ -284,6 +290,11 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost);
}
+struct device_type scsi_host_type = {
+ .name = "scsi_host",
+ .release = scsi_host_dev_release,
+};
+
/**
* scsi_host_alloc - register a scsi host adapter instance.
* @sht: pointer to scsi host template
@@ -376,33 +387,31 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
- rval = scsi_setup_command_freelist(shost);
- if (rval)
- goto fail_kfree;
-
device_initialize(&shost->shost_gendev);
snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d",
shost->host_no);
- shost->shost_gendev.release = scsi_host_dev_release;
+#ifndef CONFIG_SYSFS_DEPRECATED
+ shost->shost_gendev.bus = &scsi_bus_type;
+#endif
+ shost->shost_gendev.type = &scsi_host_type;
device_initialize(&shost->shost_dev);
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d",
shost->host_no);
+ shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
if (IS_ERR(shost->ehandler)) {
rval = PTR_ERR(shost->ehandler);
- goto fail_destroy_freelist;
+ goto fail_kfree;
}
scsi_proc_hostdir_add(shost->hostt);
return shost;
- fail_destroy_freelist:
- scsi_destroy_command_freelist(shost);
fail_kfree:
kfree(shost);
return NULL;
@@ -496,7 +505,7 @@ void scsi_exit_hosts(void)
int scsi_is_host_device(const struct device *dev)
{
- return dev->release == scsi_host_dev_release;
+ return dev->type == &scsi_host_type;
}
EXPORT_SYMBOL(scsi_is_host_device);
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 93c3fc20aa5..32553639ade 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -258,8 +258,7 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
- hwif->OUTB(WIN_IDLEIMMEDIATE,
- hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
rq->errors++;
@@ -393,7 +392,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
printk ("ide-scsi: %s: DMA complete\n", drive->name);
#endif /* IDESCSI_DEBUG_LOG */
pc->xferred = pc->req_xfer;
- (void) HWIF(drive)->ide_dma_end(drive);
+ (void)hwif->dma_ops->dma_end(drive);
}
/* Clear the interrupt */
@@ -410,9 +409,9 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
idescsi_end_request (drive, 1, 0);
return ide_stopped;
}
- bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
- hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
+ hwif->INB(hwif->io_ports.lbam_addr);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if (ireason & CD) {
printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n");
@@ -485,7 +484,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
"initiated yet DRQ isn't asserted\n");
return startstop;
}
- ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+ ireason = hwif->INB(hwif->io_ports.nsect_addr);
if ((ireason & CD) == 0 || (ireason & IO)) {
printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while "
"issuing a packet command\n");
@@ -498,7 +497,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12);
if (pc->flags & PC_FLAG_DMA_OK) {
pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
- hwif->dma_start(drive);
+ hwif->dma_ops->dma_start(drive);
}
return ide_started;
}
@@ -560,7 +559,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
if (drive->using_dma && !idescsi_map_sg(drive, pc)) {
hwif->sg_mapped = 1;
- dma = !hwif->dma_setup(drive);
+ dma = !hwif->dma_ops->dma_setup(drive);
hwif->sg_mapped = 0;
}
@@ -575,7 +574,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
return ide_started;
} else {
/* Issue the packet command */
- hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
+ hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
return idescsi_transfer_pc(drive);
}
}
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 5d231015bb2..b2d481dd375 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -217,11 +217,15 @@ static int __devexit esp_jazz_remove(struct platform_device *dev)
return 0;
}
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:jazz_esp");
+
static struct platform_driver esp_jazz_driver = {
.probe = esp_jazz_probe,
.remove = __devexit_p(esp_jazz_remove),
.driver = {
.name = "jazz_esp",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a9fbb3f8865..960baaf11fb 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -182,8 +182,8 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
}
static ssize_t
-lpfc_state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -936,7 +936,7 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
-static DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL);
static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1666,7 +1666,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_fwrev,
&dev_attr_hdw,
&dev_attr_option_rom_version,
- &dev_attr_state,
+ &dev_attr_link_state,
&dev_attr_num_discovered_ports,
&dev_attr_lpfc_drvr_version,
&dev_attr_lpfc_temp_sensor,
@@ -1714,7 +1714,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_info,
- &dev_attr_state,
+ &dev_attr_link_state,
&dev_attr_num_discovered_ports,
&dev_attr_lpfc_drvr_version,
&dev_attr_lpfc_log_verbose,
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
new file mode 100644
index 00000000000..cd37bd69a11
--- /dev/null
+++ b/drivers/scsi/mac_esp.c
@@ -0,0 +1,657 @@
+/* mac_esp.c: ESP front-end for Macintosh Quadra systems.
+ *
+ * Adapted from jazz_esp.c and the old mac_esp.c.
+ *
+ * The pseudo DMA algorithm is based on the one used in NetBSD.
+ * See sys/arch/mac68k/obio/esp.c for some background information.
+ *
+ * Copyright (C) 2007-2008 Finn Thain
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/nubus.h>
+
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include <asm/macints.h>
+#include <asm/macintosh.h>
+
+#include <scsi/scsi_host.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME "mac_esp"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_VERSION "1.000"
+#define DRV_MODULE_RELDATE "Sept 15, 2007"
+
+#define MAC_ESP_IO_BASE 0x50F00000
+#define MAC_ESP_REGS_QUADRA (MAC_ESP_IO_BASE + 0x10000)
+#define MAC_ESP_REGS_QUADRA2 (MAC_ESP_IO_BASE + 0xF000)
+#define MAC_ESP_REGS_QUADRA3 (MAC_ESP_IO_BASE + 0x18000)
+#define MAC_ESP_REGS_SPACING 0x402
+#define MAC_ESP_PDMA_REG 0xF9800024
+#define MAC_ESP_PDMA_REG_SPACING 0x4
+#define MAC_ESP_PDMA_IO_OFFSET 0x100
+
+#define esp_read8(REG) mac_esp_read8(esp, REG)
+#define esp_write8(VAL, REG) mac_esp_write8(esp, VAL, REG)
+
+struct mac_esp_priv {
+ struct esp *esp;
+ void __iomem *pdma_regs;
+ void __iomem *pdma_io;
+ int error;
+};
+static struct platform_device *internal_esp, *external_esp;
+
+#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
+ platform_get_drvdata((struct platform_device *) \
+ (esp->dev)))
+
+static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+ nubus_writeb(val, esp->regs + reg * 16);
+}
+
+static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
+{
+ return nubus_readb(esp->regs + reg * 16);
+}
+
+/* For pseudo DMA and PIO we need the virtual address
+ * so this address mapping is the identity mapping.
+ */
+
+static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
+{
+ return (dma_addr_t)buf;
+}
+
+static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ int i;
+
+ for (i = 0; i < num_sg; i++)
+ sg[i].dma_address = (u32)sg_virt(&sg[i]);
+ return num_sg;
+}
+
+static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_reset_dma(struct esp *esp)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_dma_drain(struct esp *esp)
+{
+ /* Nothing to do. */
+}
+
+static void mac_esp_dma_invalidate(struct esp *esp)
+{
+ /* Nothing to do. */
+}
+
+static int mac_esp_dma_error(struct esp *esp)
+{
+ return MAC_ESP_GET_PRIV(esp)->error;
+}
+
+static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ int i = 500000;
+
+ do {
+ if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES))
+ return 0;
+
+ if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
+ esp_read8(ESP_STATUS));
+ mep->error = 1;
+ return 1;
+}
+
+static inline int mac_esp_wait_for_dreq(struct esp *esp)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ int i = 500000;
+
+ do {
+ if (mep->pdma_regs == NULL) {
+ if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
+ return 0;
+ } else {
+ if (nubus_readl(mep->pdma_regs) & 0x200)
+ return 0;
+ }
+
+ if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
+ esp_read8(ESP_STATUS));
+ mep->error = 1;
+ return 1;
+}
+
+#define MAC_ESP_PDMA_LOOP(operands) \
+ asm volatile ( \
+ " tstw %2 \n" \
+ " jbeq 20f \n" \
+ "1: movew " operands " \n" \
+ "2: movew " operands " \n" \
+ "3: movew " operands " \n" \
+ "4: movew " operands " \n" \
+ "5: movew " operands " \n" \
+ "6: movew " operands " \n" \
+ "7: movew " operands " \n" \
+ "8: movew " operands " \n" \
+ "9: movew " operands " \n" \
+ "10: movew " operands " \n" \
+ "11: movew " operands " \n" \
+ "12: movew " operands " \n" \
+ "13: movew " operands " \n" \
+ "14: movew " operands " \n" \
+ "15: movew " operands " \n" \
+ "16: movew " operands " \n" \
+ " subqw #1,%2 \n" \
+ " jbne 1b \n" \
+ "20: tstw %3 \n" \
+ " jbeq 30f \n" \
+ "21: movew " operands " \n" \
+ " subqw #1,%3 \n" \
+ " jbne 21b \n" \
+ "30: tstw %4 \n" \
+ " jbeq 40f \n" \
+ "31: moveb " operands " \n" \
+ "32: nop \n" \
+ "40: \n" \
+ " \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .long 1b,40b \n" \
+ " .long 2b,40b \n" \
+ " .long 3b,40b \n" \
+ " .long 4b,40b \n" \
+ " .long 5b,40b \n" \
+ " .long 6b,40b \n" \
+ " .long 7b,40b \n" \
+ " .long 8b,40b \n" \
+ " .long 9b,40b \n" \
+ " .long 10b,40b \n" \
+ " .long 11b,40b \n" \
+ " .long 12b,40b \n" \
+ " .long 13b,40b \n" \
+ " .long 14b,40b \n" \
+ " .long 15b,40b \n" \
+ " .long 16b,40b \n" \
+ " .long 21b,40b \n" \
+ " .long 31b,40b \n" \
+ " .long 32b,40b \n" \
+ " .previous \n" \
+ : "+a" (addr) \
+ : "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count))
+
+static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ mep->error = 0;
+
+ if (!write)
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW);
+ esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED);
+
+ scsi_esp_cmd(esp, cmd);
+
+ do {
+ unsigned int count32 = esp_count >> 5;
+ unsigned int count2 = (esp_count & 0x1F) >> 1;
+ unsigned int start_addr = addr;
+
+ if (mac_esp_wait_for_dreq(esp))
+ break;
+
+ if (write) {
+ MAC_ESP_PDMA_LOOP("%1@,%0@+");
+
+ esp_count -= addr - start_addr;
+ } else {
+ unsigned int n;
+
+ MAC_ESP_PDMA_LOOP("%0@+,%1@");
+
+ if (mac_esp_wait_for_empty_fifo(esp))
+ break;
+
+ n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW);
+ addr = start_addr + esp_count - n;
+ esp_count = n;
+ }
+ } while (esp_count);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Programmed IO routines follow.
+ */
+
+static inline int mac_esp_wait_for_fifo(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)
+ return 0;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
+ esp_read8(ESP_STATUS));
+ return 1;
+}
+
+static inline int mac_esp_wait_for_intr(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ esp->sreg = esp_read8(ESP_STATUS);
+ if (esp->sreg & ESP_STAT_INTR)
+ return 0;
+
+ udelay(2);
+ } while (--i);
+
+ printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
+ return 1;
+}
+
+#define MAC_ESP_PIO_LOOP(operands, reg1) \
+ asm volatile ( \
+ "1: moveb " operands " \n" \
+ " subqw #1,%1 \n" \
+ " jbne 1b \n" \
+ : "+a" (addr), "+r" (reg1) \
+ : "a" (fifo))
+
+#define MAC_ESP_PIO_FILL(operands, reg1) \
+ asm volatile ( \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " moveb " operands " \n" \
+ " subqw #8,%1 \n" \
+ " subqw #8,%1 \n" \
+ : "+a" (addr), "+r" (reg1) \
+ : "a" (fifo))
+
+#define MAC_ESP_FIFO_SIZE 16
+
+static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ unsigned long flags;
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+ u8 *fifo = esp->regs + ESP_FDATA * 16;
+
+ local_irq_save(flags);
+
+ cmd &= ~ESP_CMD_DMA;
+ mep->error = 0;
+
+ if (write) {
+ scsi_esp_cmd(esp, cmd);
+
+ if (!mac_esp_wait_for_intr(esp)) {
+ if (mac_esp_wait_for_fifo(esp))
+ esp_count = 0;
+ } else {
+ esp_count = 0;
+ }
+ } else {
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ if (esp_count >= MAC_ESP_FIFO_SIZE)
+ MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
+ else
+ MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
+
+ scsi_esp_cmd(esp, cmd);
+ }
+
+ while (esp_count) {
+ unsigned int n;
+
+ if (mac_esp_wait_for_intr(esp)) {
+ mep->error = 1;
+ break;
+ }
+
+ if (esp->sreg & ESP_STAT_SPAM) {
+ printk(KERN_ERR PFX "gross error\n");
+ mep->error = 1;
+ break;
+ }
+
+ n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+ if (write) {
+ if (n > esp_count)
+ n = esp_count;
+ esp_count -= n;
+
+ MAC_ESP_PIO_LOOP("%2@,%0@+", n);
+
+ if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP)
+ break;
+
+ if (esp_count) {
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & ESP_INTR_DC)
+ break;
+
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ }
+ } else {
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & ESP_INTR_DC)
+ break;
+
+ n = MAC_ESP_FIFO_SIZE - n;
+ if (n > esp_count)
+ n = esp_count;
+
+ if (n == MAC_ESP_FIFO_SIZE) {
+ MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
+ } else {
+ esp_count -= n;
+ MAC_ESP_PIO_LOOP("%0@+,%2@", n);
+ }
+
+ scsi_esp_cmd(esp, ESP_CMD_TI);
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+static int mac_esp_irq_pending(struct esp *esp)
+{
+ if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+ return 0;
+}
+
+static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
+{
+ return dma_len > 0xFFFF ? 0xFFFF : dma_len;
+}
+
+static struct esp_driver_ops mac_esp_ops = {
+ .esp_write8 = mac_esp_write8,
+ .esp_read8 = mac_esp_read8,
+ .map_single = mac_esp_map_single,
+ .map_sg = mac_esp_map_sg,
+ .unmap_single = mac_esp_unmap_single,
+ .unmap_sg = mac_esp_unmap_sg,
+ .irq_pending = mac_esp_irq_pending,
+ .dma_length_limit = mac_esp_dma_length_limit,
+ .reset_dma = mac_esp_reset_dma,
+ .dma_drain = mac_esp_dma_drain,
+ .dma_invalidate = mac_esp_dma_invalidate,
+ .send_dma_cmd = mac_esp_send_pdma_cmd,
+ .dma_error = mac_esp_dma_error,
+};
+
+static int __devinit esp_mac_probe(struct platform_device *dev)
+{
+ struct scsi_host_template *tpnt = &scsi_esp_template;
+ struct Scsi_Host *host;
+ struct esp *esp;
+ int err;
+ int chips_present;
+ struct mac_esp_priv *mep;
+
+ if (!MACH_IS_MAC)
+ return -ENODEV;
+
+ switch (macintosh_config->scsi_type) {
+ case MAC_SCSI_QUADRA:
+ case MAC_SCSI_QUADRA3:
+ chips_present = 1;
+ break;
+ case MAC_SCSI_QUADRA2:
+ if ((macintosh_config->ident == MAC_MODEL_Q900) ||
+ (macintosh_config->ident == MAC_MODEL_Q950))
+ chips_present = 2;
+ else
+ chips_present = 1;
+ break;
+ default:
+ chips_present = 0;
+ }
+
+ if (dev->id + 1 > chips_present)
+ return -ENODEV;
+
+ host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+ err = -ENOMEM;
+ if (!host)
+ goto fail;
+
+ host->max_id = 8;
+ host->use_clustering = DISABLE_CLUSTERING;
+ esp = shost_priv(host);
+
+ esp->host = host;
+ esp->dev = dev;
+
+ esp->command_block = kzalloc(16, GFP_KERNEL);
+ if (!esp->command_block)
+ goto fail_unlink;
+ esp->command_block_dma = (dma_addr_t)esp->command_block;
+
+ esp->scsi_id = 7;
+ host->this_id = esp->scsi_id;
+ esp->scsi_id_mask = 1 << esp->scsi_id;
+
+ mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL);
+ if (!mep)
+ goto fail_free_command_block;
+ mep->esp = esp;
+ platform_set_drvdata(dev, mep);
+
+ switch (macintosh_config->scsi_type) {
+ case MAC_SCSI_QUADRA:
+ esp->cfreq = 16500000;
+ esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA;
+ mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
+ mep->pdma_regs = NULL;
+ break;
+ case MAC_SCSI_QUADRA2:
+ esp->cfreq = 25000000;
+ esp->regs = (void __iomem *)(MAC_ESP_REGS_QUADRA2 +
+ dev->id * MAC_ESP_REGS_SPACING);
+ mep->pdma_io = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
+ mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG +
+ dev->id * MAC_ESP_PDMA_REG_SPACING);
+ nubus_writel(0x1d1, mep->pdma_regs);
+ break;
+ case MAC_SCSI_QUADRA3:
+ /* These quadras have a real DMA controller (the PSC) but we
+ * don't know how to drive it so we must use PIO instead.
+ */
+ esp->cfreq = 25000000;
+ esp->regs = (void __iomem *)MAC_ESP_REGS_QUADRA3;
+ mep->pdma_io = NULL;
+ mep->pdma_regs = NULL;
+ break;
+ }
+
+ esp->ops = &mac_esp_ops;
+ if (mep->pdma_io == NULL) {
+ printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
+ esp_write8(0, ESP_TCLOW);
+ esp_write8(0, ESP_TCMED);
+ esp->flags = ESP_FLAG_DISABLE_SYNC;
+ mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
+ } else {
+ printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
+ }
+
+ host->irq = IRQ_MAC_SCSI;
+ err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP",
+ esp);
+ if (err < 0)
+ goto fail_free_priv;
+
+ err = scsi_esp_register(esp, &dev->dev);
+ if (err)
+ goto fail_free_irq;
+
+ return 0;
+
+fail_free_irq:
+ free_irq(host->irq, esp);
+fail_free_priv:
+ kfree(mep);
+fail_free_command_block:
+ kfree(esp->command_block);
+fail_unlink:
+ scsi_host_put(host);
+fail:
+ return err;
+}
+
+static int __devexit esp_mac_remove(struct platform_device *dev)
+{
+ struct mac_esp_priv *mep = platform_get_drvdata(dev);
+ struct esp *esp = mep->esp;
+ unsigned int irq = esp->host->irq;
+
+ scsi_esp_unregister(esp);
+
+ free_irq(irq, esp);
+
+ kfree(mep);
+
+ kfree(esp->command_block);
+
+ scsi_host_put(esp->host);
+
+ return 0;
+}
+
+static struct platform_driver esp_mac_driver = {
+ .probe = esp_mac_probe,
+ .remove = __devexit_p(esp_mac_remove),
+ .driver = {
+ .name = DRV_MODULE_NAME,
+ },
+};
+
+static int __init mac_esp_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&esp_mac_driver);
+ if (err)
+ return err;
+
+ internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0);
+ if (internal_esp && platform_device_add(internal_esp)) {
+ platform_device_put(internal_esp);
+ internal_esp = NULL;
+ }
+
+ external_esp = platform_device_alloc(DRV_MODULE_NAME, 1);
+ if (external_esp && platform_device_add(external_esp)) {
+ platform_device_put(external_esp);
+ external_esp = NULL;
+ }
+
+ if (internal_esp || external_esp) {
+ return 0;
+ } else {
+ platform_driver_unregister(&esp_mac_driver);
+ return -ENOMEM;
+ }
+}
+
+static void __exit mac_esp_exit(void)
+{
+ platform_driver_unregister(&esp_mac_driver);
+
+ if (internal_esp) {
+ platform_device_unregister(internal_esp);
+ internal_esp = NULL;
+ }
+ if (external_esp) {
+ platform_device_unregister(external_esp);
+ external_esp = NULL;
+ }
+}
+
+MODULE_DESCRIPTION("Mac ESP SCSI driver");
+MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
+MODULE_LICENSE("GPLv2");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(mac_esp_init);
+module_exit(mac_esp_exit);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d61df036910..287690853ca 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -609,8 +609,8 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-qla2x00_state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
int len = 0;
@@ -814,7 +814,7 @@ static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
-static DEVICE_ATTR(state, S_IRUGO, qla2x00_state_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
qla2x00_zio_timer_store);
@@ -838,7 +838,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_model_name,
&dev_attr_model_desc,
&dev_attr_pci_info,
- &dev_attr_state,
+ &dev_attr_link_state,
&dev_attr_zio,
&dev_attr_zio_timer,
&dev_attr_beacon,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9d12d9f2620..cbef785765c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -38,78 +38,38 @@ qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
}
static int
-qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
- uint32_t cram_size, uint32_t *ext_mem, void **nxt)
+qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
+ uint32_t ram_dwords, void **nxt)
{
int rval;
- uint32_t cnt, stat, timer, risc_address, ext_mem_cnt;
- uint16_t mb[4];
+ uint32_t cnt, stat, timer, dwords, idx;
+ uint16_t mb0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ dma_addr_t dump_dma = ha->gid_list_dma;
+ uint32_t *dump = (uint32_t *)ha->gid_list;
rval = QLA_SUCCESS;
- risc_address = ext_mem_cnt = 0;
- memset(mb, 0, sizeof(mb));
+ mb0 = 0;
- /* Code RAM. */
- risc_address = 0x20000;
- WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
+ WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- for (cnt = 0; cnt < cram_size / 4 && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
- WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
- RD_REG_WORD(&reg->mailbox8);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
-
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->host_status);
- if (stat & HSRX_RISC_INT) {
- stat &= 0xff;
+ dwords = GID_LIST_SIZE / 4;
+ for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+ cnt += dwords, addr += dwords) {
+ if (cnt + dwords > ram_dwords)
+ dwords = ram_dwords - cnt;
- if (stat == 0x1 || stat == 0x2 ||
- stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
+ WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+ WRT_REG_WORD(&reg->mailbox8, MSW(addr));
- mb[0] = RD_REG_WORD(&reg->mailbox0);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+ WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+ WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_DWORD(&reg->hccr,
- HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- break;
- }
-
- /* Clear this intr; it wasn't a mailbox intr */
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- }
- udelay(5);
- }
-
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb[0] & MBS_MASK;
- code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
- } else {
- rval = QLA_FUNCTION_FAILED;
- }
- }
-
- if (rval == QLA_SUCCESS) {
- /* External Memory. */
- risc_address = 0x100000;
- ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
- WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- }
- for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
- WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
- RD_REG_WORD(&reg->mailbox8);
+ WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+ WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
for (timer = 6000000; timer; timer--) {
@@ -123,9 +83,7 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
- mb[0] = RD_REG_WORD(&reg->mailbox0);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb0 = RD_REG_WORD(&reg->mailbox0);
WRT_REG_DWORD(&reg->hccr,
HCCRX_CLR_RISC_INT);
@@ -141,17 +99,34 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
}
if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb[0] & MBS_MASK;
- ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]);
+ rval = mb0 & MBS_MASK;
+ for (idx = 0; idx < dwords; idx++)
+ ram[cnt + idx] = swab32(dump[idx]);
} else {
rval = QLA_FUNCTION_FAILED;
}
}
- *nxt = rval == QLA_SUCCESS ? &ext_mem[cnt]: NULL;
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
return rval;
}
+static int
+qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
+ uint32_t cram_size, void **nxt)
+{
+ int rval;
+
+ /* Code RAM. */
+ rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ /* External Memory. */
+ return qla24xx_dump_ram(ha, 0x100000, *nxt,
+ ha->fw_memory_size - 0x100000 + 1, nxt);
+}
+
static uint32_t *
qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
uint32_t count, uint32_t *buf)
@@ -239,6 +214,90 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
return rval;
}
+static int
+qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
+ uint16_t ram_words, void **nxt)
+{
+ int rval;
+ uint32_t cnt, stat, timer, words, idx;
+ uint16_t mb0;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ dma_addr_t dump_dma = ha->gid_list_dma;
+ uint16_t *dump = (uint16_t *)ha->gid_list;
+
+ rval = QLA_SUCCESS;
+ mb0 = 0;
+
+ WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ words = GID_LIST_SIZE / 2;
+ for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
+ cnt += words, addr += words) {
+ if (cnt + words > ram_words)
+ words = ram_words - cnt;
+
+ WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
+ WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
+
+ WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
+ WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
+ WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
+ WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
+
+ WRT_MAILBOX_REG(ha, reg, 4, words);
+ WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ if (stat & HSR_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+ /* Release mailbox registers. */
+ WRT_REG_WORD(&reg->semaphore, 0);
+ WRT_REG_WORD(&reg->hccr,
+ HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ break;
+ } else if (stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+ WRT_REG_WORD(&reg->hccr,
+ HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ break;
+ }
+
+ /* clear this intr; it wasn't a mailbox intr */
+ WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+ RD_REG_WORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb0 & MBS_MASK;
+ for (idx = 0; idx < words; idx++)
+ ram[cnt + idx] = swab16(dump[idx]);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+ return rval;
+}
+
static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
uint16_t *buf)
@@ -258,19 +317,14 @@ void
qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
{
int rval;
- uint32_t cnt, timer;
- uint32_t risc_address;
- uint16_t mb0, mb2;
+ uint32_t cnt;
- uint32_t stat;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint16_t __iomem *dmp_reg;
unsigned long flags;
struct qla2300_fw_dump *fw;
- uint32_t data_ram_cnt;
+ void *nxt;
- risc_address = data_ram_cnt = 0;
- mb0 = mb2 = 0;
flags = 0;
if (!hardware_locked)
@@ -388,185 +442,23 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
}
}
- if (rval == QLA_SUCCESS) {
- /* Get RISC SRAM. */
- risc_address = 0x800;
- WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- }
- for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_MAILBOX_REG(ha, reg, 1, (uint16_t)risc_address);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
-
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
- if (stat & HSR_RISC_INT) {
- stat &= 0xff;
-
- if (stat == 0x1 || stat == 0x2) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- /* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- } else if (stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- }
-
- /* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- }
- udelay(5);
- }
-
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb0 & MBS_MASK;
- fw->risc_ram[cnt] = htons(mb2);
- } else {
- rval = QLA_FUNCTION_FAILED;
- }
- }
-
- if (rval == QLA_SUCCESS) {
- /* Get stack SRAM. */
- risc_address = 0x10000;
- WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- }
- for (cnt = 0; cnt < sizeof(fw->stack_ram) / 2 && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
- WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
-
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
- if (stat & HSR_RISC_INT) {
- stat &= 0xff;
-
- if (stat == 0x1 || stat == 0x2) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- /* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- } else if (stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- }
-
- /* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- }
- udelay(5);
- }
-
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb0 & MBS_MASK;
- fw->stack_ram[cnt] = htons(mb2);
- } else {
- rval = QLA_FUNCTION_FAILED;
- }
- }
-
- if (rval == QLA_SUCCESS) {
- /* Get data SRAM. */
- risc_address = 0x11000;
- data_ram_cnt = ha->fw_memory_size - risc_address + 1;
- WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- }
- for (cnt = 0; cnt < data_ram_cnt && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
- WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
-
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
- if (stat & HSR_RISC_INT) {
- stat &= 0xff;
-
- if (stat == 0x1 || stat == 0x2) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- /* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- } else if (stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
-
- mb0 = RD_MAILBOX_REG(ha, reg, 0);
- mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
- WRT_REG_WORD(&reg->hccr,
- HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- break;
- }
+ /* Get RISC SRAM. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
+ sizeof(fw->risc_ram) / 2, &nxt);
- /* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
- }
- udelay(5);
- }
+ /* Get stack SRAM. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
+ sizeof(fw->stack_ram) / 2, &nxt);
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb0 & MBS_MASK;
- fw->data_ram[cnt] = htons(mb2);
- } else {
- rval = QLA_FUNCTION_FAILED;
- }
- }
+ /* Get data SRAM. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
+ ha->fw_memory_size - 0x11000 + 1, &nxt);
if (rval == QLA_SUCCESS)
- qla2xxx_copy_queues(ha, &fw->data_ram[cnt]);
+ qla2xxx_copy_queues(ha, nxt);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
@@ -1010,7 +902,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
goto qla24xx_fw_dump_failed_0;
rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
- fw->ext_mem, &nxt);
+ &nxt);
if (rval != QLA_SUCCESS)
goto qla24xx_fw_dump_failed_0;
@@ -1318,7 +1210,7 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
goto qla25xx_fw_dump_failed_0;
rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
- fw->ext_mem, &nxt);
+ &nxt);
if (rval != QLA_SUCCESS)
goto qla25xx_fw_dump_failed_0;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 078f2a15f40..cf194517400 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1036,22 +1036,6 @@ struct mid_db_entry_24xx {
uint8_t reserved_1;
};
- /*
- * Virtual Fabric ID type definition.
- */
-typedef struct vf_id {
- uint16_t id : 12;
- uint16_t priority : 4;
-} vf_id_t;
-
-/*
- * Virtual Fabric HopCt type definition.
- */
-typedef struct vf_hopct {
- uint16_t reserved : 8;
- uint16_t hopct : 8;
-} vf_hopct_t;
-
/*
* Virtual Port Control IOCB
*/
@@ -1082,10 +1066,10 @@ struct vp_ctrl_entry_24xx {
uint8_t vp_idx_map[16];
uint16_t flags;
- struct vf_id id;
+ uint16_t id;
uint16_t reserved_4;
- struct vf_hopct hopct;
- uint8_t reserved_5[8];
+ uint16_t hopct;
+ uint8_t reserved_5[24];
};
/*
@@ -1132,9 +1116,9 @@ struct vp_config_entry_24xx {
uint16_t reserved_vp2;
uint8_t port_name_idx2[WWN_SIZE];
uint8_t node_name_idx2[WWN_SIZE];
- struct vf_id id;
+ uint16_t id;
uint16_t reserved_4;
- struct vf_hopct hopct;
+ uint16_t hopct;
uint8_t reserved_5;
};
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 76eb4fecce6..f8827068d30 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -152,10 +152,6 @@ extern int
qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
extern int
-qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t,
- uint32_t);
-
-extern int
qla2x00_abort_command(scsi_qla_host_t *, srb_t *);
extern int
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 750d7ef83aa..4cb80b476c8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1583,8 +1583,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
eiter->len = __constant_cpu_to_be16(4 + 4);
max_frame_size = IS_FWI2_CAPABLE(ha) ?
- (uint32_t) icb24->frame_payload_size:
- (uint32_t) ha->init_cb->frame_payload_size;
+ le16_to_cpu(icb24->frame_payload_size):
+ le16_to_cpu(ha->init_cb->frame_payload_size);
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
size += 4 + 4;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 01e26087c1d..bbbc5a632a1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3645,7 +3645,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
if (le16_to_cpu(nv->login_timeout) < 4)
nv->login_timeout = __constant_cpu_to_le16(4);
ha->login_timeout = le16_to_cpu(nv->login_timeout);
- icb->login_timeout = cpu_to_le16(nv->login_timeout);
+ icb->login_timeout = nv->login_timeout;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 285479b62d8..5d9a64a7879 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -409,6 +409,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
}
set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
ha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
@@ -454,8 +455,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
ha->flags.management_server_logged_in = 0;
ha->link_data_rate = PORT_SPEED_UNKNOWN;
- if (ql2xfdmienable)
- set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
break;
@@ -511,6 +510,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
}
set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
ha->flags.gpsc_supported = 1;
ha->flags.management_server_logged_in = 0;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7d0a8a4c771..21006042080 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -681,7 +681,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
* Context:
* Kernel context.
*/
-int
+static int
qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
dma_addr_t phys_addr, size_t size, uint32_t tov)
{
@@ -784,7 +784,6 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
ha->host_no, rval));
} else {
- sp->flags |= SRB_ABORT_PENDING;
DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
ha->host_no));
}
@@ -1469,7 +1468,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
- lg->vp_index = cpu_to_le16(ha->vp_idx);
+ lg->vp_index = ha->vp_idx;
rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
@@ -1724,7 +1723,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
- lg->vp_index = cpu_to_le16(ha->vp_idx);
+ lg->vp_index = ha->vp_idx;
rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
@@ -2210,7 +2209,6 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
- sp->flags |= SRB_ABORT_PENDING;
}
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2644,12 +2642,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
struct vp_rpt_id_entry_24xx *rptid_entry)
{
uint8_t vp_idx;
+ uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
scsi_qla_host_t *vha;
if (rptid_entry->entry_status != 0)
return;
- if (rptid_entry->entry_status != __constant_cpu_to_le16(CS_COMPLETE))
- return;
if (rptid_entry->format == 0) {
DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
@@ -2659,17 +2656,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]));
} else if (rptid_entry->format == 1) {
- vp_idx = LSB(rptid_entry->vp_idx);
+ vp_idx = LSB(stat);
DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
"- status %d - "
"with port id %02x%02x%02x\n",__func__,ha->host_no,
- vp_idx, MSB(rptid_entry->vp_idx),
+ vp_idx, MSB(stat),
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]));
if (vp_idx == 0)
return;
- if (MSB(rptid_entry->vp_idx) == 1)
+ if (MSB(stat) == 1)
return;
list_for_each_entry(vha, &ha->vp_list, vp_list)
@@ -2982,8 +2979,8 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
/* We update the firmware with only one data sequence. */
options |= VCO_END_OF_DATA;
- retry = 0;
do {
+ retry = 0;
memset(mn, 0, sizeof(*mn));
mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
mn->p.req.entry_count = 1;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8b33b163b1d..3223fd16bcf 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -67,7 +67,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
-int ql2xfdmienable;
+int ql2xfdmienable=1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registratons "
@@ -2135,7 +2135,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
kfree(ha->nvram);
}
-struct qla_work_evt *
+static struct qla_work_evt *
qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
int locked)
{
@@ -2152,7 +2152,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
return e;
}
-int
+static int
qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
{
unsigned long flags;
@@ -2373,7 +2373,7 @@ qla2x00_do_dpc(void *data)
} else {
fcport->login_retry = 0;
}
- if (fcport->login_retry == 0)
+ if (fcport->login_retry == 0 && status != QLA_SUCCESS)
fcport->loop_id = FC_NO_LOOP_ID;
}
if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
@@ -2599,6 +2599,10 @@ qla2x00_timer(scsi_qla_host_t *ha)
start_dpc++;
}
+ /* Process any deferred work. */
+ if (!list_empty(&ha->work_list))
+ start_dpc++;
+
/* Schedule the DPC routine if needed */
if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index f42f17acf2c..afeae2bfe7e 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.01-k1"
+#define QLA2XXX_VERSION "8.02.01-k2"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3f34e9376b0..b33e72516ef 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -121,6 +121,7 @@ extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
extern struct bus_type scsi_bus_type;
+extern struct attribute_group *scsi_sysfs_shost_attr_groups[];
/* scsi_netlink.c */
#ifdef CONFIG_SCSI_NETLINK
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index ed395154a5b..3a1c99d5c77 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -190,10 +190,14 @@ void scsi_proc_host_rm(struct Scsi_Host *shost)
*/
static int proc_print_scsidevice(struct device *dev, void *data)
{
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_device *sdev;
struct seq_file *s = data;
int i;
+ if (!scsi_is_sdev_device(dev))
+ goto out;
+
+ sdev = to_scsi_device(dev);
seq_printf(s,
"Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
@@ -230,6 +234,7 @@ static int proc_print_scsidevice(struct device *dev, void *data)
else
seq_printf(s, "\n");
+out:
return 0;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e67c14e31ba..fcd7455ffc3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -322,6 +322,21 @@ out:
return NULL;
}
+static void scsi_target_destroy(struct scsi_target *starget)
+{
+ struct device *dev = &starget->dev;
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+ unsigned long flags;
+
+ transport_destroy_device(dev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->hostt->target_destroy)
+ shost->hostt->target_destroy(starget);
+ list_del_init(&starget->siblings);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ put_device(dev);
+}
+
static void scsi_target_dev_release(struct device *dev)
{
struct device *parent = dev->parent;
@@ -331,9 +346,14 @@ static void scsi_target_dev_release(struct device *dev)
put_device(parent);
}
+struct device_type scsi_target_type = {
+ .name = "scsi_target",
+ .release = scsi_target_dev_release,
+};
+
int scsi_is_target_device(const struct device *dev)
{
- return dev->release == scsi_target_dev_release;
+ return dev->type == &scsi_target_type;
}
EXPORT_SYMBOL(scsi_is_target_device);
@@ -391,14 +411,17 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
device_initialize(dev);
starget->reap_ref = 1;
dev->parent = get_device(parent);
- dev->release = scsi_target_dev_release;
sprintf(dev->bus_id, "target%d:%d:%d",
shost->host_no, channel, id);
+#ifndef CONFIG_SYSFS_DEPRECATED
+ dev->bus = &scsi_bus_type;
+#endif
+ dev->type = &scsi_target_type;
starget->id = id;
starget->channel = channel;
INIT_LIST_HEAD(&starget->siblings);
INIT_LIST_HEAD(&starget->devices);
- starget->state = STARGET_RUNNING;
+ starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
retry:
spin_lock_irqsave(shost->host_lock, flags);
@@ -411,18 +434,6 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
spin_unlock_irqrestore(shost->host_lock, flags);
/* allocate and add */
transport_setup_device(dev);
- error = device_add(dev);
- if (error) {
- dev_err(dev, "target device_add failed, error %d\n", error);
- spin_lock_irqsave(shost->host_lock, flags);
- list_del_init(&starget->siblings);
- spin_unlock_irqrestore(shost->host_lock, flags);
- transport_destroy_device(dev);
- put_device(parent);
- kfree(starget);
- return NULL;
- }
- transport_add_device(dev);
if (shost->hostt->target_alloc) {
error = shost->hostt->target_alloc(starget);
@@ -430,9 +441,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final
* put because it will be under the host lock */
- get_device(dev);
- scsi_target_reap(starget);
- put_device(dev);
+ scsi_target_destroy(starget);
return NULL;
}
}
@@ -459,18 +468,10 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
{
struct scsi_target *starget =
container_of(work, struct scsi_target, ew.work);
- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
- unsigned long flags;
transport_remove_device(&starget->dev);
device_del(&starget->dev);
- transport_destroy_device(&starget->dev);
- spin_lock_irqsave(shost->host_lock, flags);
- if (shost->hostt->target_destroy)
- shost->hostt->target_destroy(starget);
- list_del_init(&starget->siblings);
- spin_unlock_irqrestore(shost->host_lock, flags);
- put_device(&starget->dev);
+ scsi_target_destroy(starget);
}
/**
@@ -485,21 +486,25 @@ void scsi_target_reap(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
unsigned long flags;
+ enum scsi_target_state state;
+ int empty;
spin_lock_irqsave(shost->host_lock, flags);
+ state = starget->state;
+ empty = --starget->reap_ref == 0 &&
+ list_empty(&starget->devices) ? 1 : 0;
+ spin_unlock_irqrestore(shost->host_lock, flags);
- if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
- BUG_ON(starget->state == STARGET_DEL);
- starget->state = STARGET_DEL;
- spin_unlock_irqrestore(shost->host_lock, flags);
- execute_in_process_context(scsi_target_reap_usercontext,
- &starget->ew);
+ if (!empty)
return;
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
-
- return;
+ BUG_ON(state == STARGET_DEL);
+ starget->state = STARGET_DEL;
+ if (state == STARGET_CREATED)
+ scsi_target_destroy(starget);
+ else
+ execute_in_process_context(scsi_target_reap_usercontext,
+ &starget->ew);
}
/**
@@ -1048,8 +1053,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
scsi_inq_str(vend, result, 8, 16),
scsi_inq_str(mod, result, 16, 32));
});
+
}
-
+
res = SCSI_SCAN_TARGET_PRESENT;
goto out_free_result;
}
@@ -1489,7 +1495,6 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
if (scsi_host_scan_allowed(shost))
scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
mutex_unlock(&shost->scan_mutex);
- transport_configure_device(&starget->dev);
scsi_target_reap(starget);
put_device(&starget->dev);
@@ -1570,7 +1575,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
out_reap:
/* now determine if the target has any children at all
* and if not, nuke it */
- transport_configure_device(&starget->dev);
scsi_target_reap(starget);
put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 67bb20ed45d..049103f1d16 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -21,6 +21,8 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
+static struct device_type scsi_dev_type;
+
static const struct {
enum scsi_device_state value;
char *name;
@@ -249,18 +251,27 @@ shost_rd_attr(sg_tablesize, "%hu\n");
shost_rd_attr(unchecked_isa_dma, "%d\n");
shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
-static struct device_attribute *scsi_sysfs_shost_attrs[] = {
- &dev_attr_unique_id,
- &dev_attr_host_busy,
- &dev_attr_cmd_per_lun,
- &dev_attr_can_queue,
- &dev_attr_sg_tablesize,
- &dev_attr_unchecked_isa_dma,
- &dev_attr_proc_name,
- &dev_attr_scan,
- &dev_attr_hstate,
- &dev_attr_supported_mode,
- &dev_attr_active_mode,
+static struct attribute *scsi_sysfs_shost_attrs[] = {
+ &dev_attr_unique_id.attr,
+ &dev_attr_host_busy.attr,
+ &dev_attr_cmd_per_lun.attr,
+ &dev_attr_can_queue.attr,
+ &dev_attr_sg_tablesize.attr,
+ &dev_attr_unchecked_isa_dma.attr,
+ &dev_attr_proc_name.attr,
+ &dev_attr_scan.attr,
+ &dev_attr_hstate.attr,
+ &dev_attr_supported_mode.attr,
+ &dev_attr_active_mode.attr,
+ NULL
+};
+
+struct attribute_group scsi_shost_attr_group = {
+ .attrs = scsi_sysfs_shost_attrs,
+};
+
+struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
+ &scsi_shost_attr_group,
NULL
};
@@ -335,7 +346,12 @@ static struct class sdev_class = {
/* all probing is done in the individual ->probe routines */
static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
{
- struct scsi_device *sdp = to_scsi_device(dev);
+ struct scsi_device *sdp;
+
+ if (dev->type != &scsi_dev_type)
+ return 0;
+
+ sdp = to_scsi_device(dev);
if (sdp->no_uld_attach)
return 0;
return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
@@ -351,10 +367,16 @@ static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static int scsi_bus_suspend(struct device * dev, pm_message_t state)
{
- struct device_driver *drv = dev->driver;
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct device_driver *drv;
+ struct scsi_device *sdev;
int err;
+ if (dev->type != &scsi_dev_type)
+ return 0;
+
+ drv = dev->driver;
+ sdev = to_scsi_device(dev);
+
err = scsi_device_quiesce(sdev);
if (err)
return err;
@@ -370,10 +392,16 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
static int scsi_bus_resume(struct device * dev)
{
- struct device_driver *drv = dev->driver;
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct device_driver *drv;
+ struct scsi_device *sdev;
int err = 0;
+ if (dev->type != &scsi_dev_type)
+ return 0;
+
+ drv = dev->driver;
+ sdev = to_scsi_device(dev);
+
if (drv && drv->resume)
err = drv->resume(dev);
@@ -781,6 +809,27 @@ sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
return count;
}
+static int scsi_target_add(struct scsi_target *starget)
+{
+ int error;
+
+ if (starget->state != STARGET_CREATED)
+ return 0;
+
+ error = device_add(&starget->dev);
+ if (error) {
+ dev_err(&starget->dev, "target device_add failed, error %d\n", error);
+ get_device(&starget->dev);
+ scsi_target_reap(starget);
+ put_device(&starget->dev);
+ return error;
+ }
+ transport_add_device(&starget->dev);
+ starget->state = STARGET_RUNNING;
+
+ return 0;
+}
+
static struct device_attribute sdev_attr_queue_type_rw =
__ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
sdev_store_queue_type_rw);
@@ -796,10 +845,16 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
{
int error, i;
struct request_queue *rq = sdev->request_queue;
+ struct scsi_target *starget = sdev->sdev_target;
if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
return error;
+ error = scsi_target_add(starget);
+ if (error)
+ return error;
+
+ transport_configure_device(&starget->dev);
error = device_add(&sdev->sdev_gendev);
if (error) {
put_device(sdev->sdev_gendev.parent);
@@ -834,7 +889,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
goto out;
}
- error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL);
+ error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
if (error)
sdev_printk(KERN_INFO, sdev,
@@ -971,44 +1026,6 @@ int scsi_register_interface(struct class_interface *intf)
}
EXPORT_SYMBOL(scsi_register_interface);
-
-static struct device_attribute *class_attr_overridden(
- struct device_attribute **attrs,
- struct device_attribute *attr)
-{
- int i;
-
- if (!attrs)
- return NULL;
- for (i = 0; attrs[i]; i++)
- if (!strcmp(attrs[i]->attr.name, attr->attr.name))
- return attrs[i];
- return NULL;
-}
-
-static int class_attr_add(struct device *classdev,
- struct device_attribute *attr)
-{
- struct device_attribute *base_attr;
-
- /*
- * Spare the caller from having to copy things it's not interested in.
- */
- base_attr = class_attr_overridden(scsi_sysfs_shost_attrs, attr);
- if (base_attr) {
- /* extend permissions */
- attr->attr.mode |= base_attr->attr.mode;
-
- /* override null show/store with default */
- if (!attr->show)
- attr->show = base_attr->show;
- if (!attr->store)
- attr->store = base_attr->store;
- }
-
- return device_create_file(classdev, attr);
-}
-
/**
* scsi_sysfs_add_host - add scsi host to subsystem
* @shost: scsi host struct to add to subsystem
@@ -1018,20 +1035,11 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
{
int error, i;
+ /* add host specific attributes */
if (shost->hostt->shost_attrs) {
for (i = 0; shost->hostt->shost_attrs[i]; i++) {
- error = class_attr_add(&shost->shost_dev,
- shost->hostt->shost_attrs[i]);
- if (error)
- return error;
- }
- }
-
- for (i = 0; scsi_sysfs_shost_attrs[i]; i++) {
- if (!class_attr_overridden(shost->hostt->shost_attrs,
- scsi_sysfs_shost_attrs[i])) {
error = device_create_file(&shost->shost_dev,
- scsi_sysfs_shost_attrs[i]);
+ shost->hostt->shost_attrs[i]);
if (error)
return error;
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6b092a6c295..5fd64e70029 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1961,12 +1961,17 @@ fc_timed_out(struct scsi_cmnd *scmd)
}
/*
- * Must be called with shost->host_lock held
+ * Called by fc_user_scan to locate an rport on the shost that
+ * matches the channel and target id, and invoke scsi_scan_target()
+ * on the rport.
*/
-static int fc_user_scan(struct Scsi_Host *shost, uint channel,
- uint id, uint lun)
+static void
+fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
{
struct fc_rport *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(rport, &fc_host_rports(shost), peers) {
if (rport->scsi_target_id == -1)
@@ -1975,13 +1980,54 @@ static int fc_user_scan(struct Scsi_Host *shost, uint channel,
if (rport->port_state != FC_PORTSTATE_ONLINE)
continue;
- if ((channel == SCAN_WILD_CARD || channel == rport->channel) &&
- (id == SCAN_WILD_CARD || id == rport->scsi_target_id)) {
- scsi_scan_target(&rport->dev, rport->channel,
- rport->scsi_target_id, lun, 1);
+ if ((channel == rport->channel) &&
+ (id == rport->scsi_target_id)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ scsi_scan_target(&rport->dev, channel, id, lun, 1);
+ return;
}
}
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Called via sysfs scan routines. Necessary, as the FC transport
+ * wants to place all target objects below the rport object. So this
+ * routine must invoke the scsi_scan_target() routine with the rport
+ * object as the parent.
+ */
+static int
+fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
+{
+ uint chlo, chhi;
+ uint tgtlo, tgthi;
+
+ if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
+ ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
+ ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
+ return -EINVAL;
+
+ if (channel == SCAN_WILD_CARD) {
+ chlo = 0;
+ chhi = shost->max_channel + 1;
+ } else {
+ chlo = channel;
+ chhi = channel + 1;
+ }
+
+ if (id == SCAN_WILD_CARD) {
+ tgtlo = 0;
+ tgthi = shost->max_id;
+ } else {
+ tgtlo = id;
+ tgthi = id + 1;
+ }
+
+ for ( ; chlo < chhi; chlo++)
+ for ( ; tgtlo < tgthi; tgtlo++)
+ fc_user_scan_tgt(shost, chlo, tgtlo, lun);
+
return 0;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 27ec625ab77..7899e3dda9b 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -192,6 +192,16 @@ static void sas_non_host_smp_request(struct request_queue *q)
sas_smp_request(q, rphy_to_shost(rphy), rphy);
}
+static void sas_host_release(struct device *dev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct request_queue *q = sas_host->q;
+
+ if (q)
+ blk_cleanup_queue(q);
+}
+
static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
{
struct request_queue *q;
@@ -199,6 +209,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
struct device *dev;
char namebuf[BUS_ID_SIZE];
const char *name;
+ void (*release)(struct device *);
if (!to_sas_internal(shost->transportt)->f->smp_handler) {
printk("%s can't handle SMP requests\n", shost->hostt->name);
@@ -209,17 +220,19 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
q = blk_init_queue(sas_non_host_smp_request, NULL);
dev = &rphy->dev;
name = dev->bus_id;
+ release = NULL;
} else {
q = blk_init_queue(sas_host_smp_request, NULL);
dev = &shost->shost_gendev;
snprintf(namebuf, sizeof(namebuf),
"sas_host%d", shost->host_no);
name = namebuf;
+ release = sas_host_release;
}
if (!q)
return -ENOMEM;
- error = bsg_register_queue(q, dev, name);
+ error = bsg_register_queue(q, dev, name, release);
if (error) {
blk_cleanup_queue(q);
return -ENOMEM;
@@ -253,7 +266,6 @@ static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
return;
bsg_unregister_queue(q);
- blk_cleanup_queue(q);
}
/*
@@ -1301,6 +1313,9 @@ static void sas_expander_release(struct device *dev)
struct sas_rphy *rphy = dev_to_rphy(dev);
struct sas_expander_device *edev = rphy_to_expander_device(rphy);
+ if (rphy->q)
+ blk_cleanup_queue(rphy->q);
+
put_device(dev->parent);
kfree(edev);
}
@@ -1310,6 +1325,9 @@ static void sas_end_device_release(struct device *dev)
struct sas_rphy *rphy = dev_to_rphy(dev);
struct sas_end_device *edev = rphy_to_end_device(rphy);
+ if (rphy->q)
+ blk_cleanup_queue(rphy->q);
+
put_device(dev->parent);
kfree(edev);
}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index bc12b5d5d67..75a64a6cae8 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -24,6 +24,7 @@
#include <linux/workqueue.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
+#include <linux/sysfs.h>
#include <scsi/scsi.h>
#include "scsi_priv.h"
#include <scsi/scsi_device.h>
@@ -1374,11 +1375,11 @@ static int spi_host_configure(struct transport_container *tc,
* overloads the return by setting 1<<1 if the attribute should
* be writeable */
#define TARGET_ATTRIBUTE_HELPER(name) \
- (si->f->show_##name ? 1 : 0) + \
- (si->f->set_##name ? 2 : 0)
+ (si->f->show_##name ? S_IRUGO : 0) | \
+ (si->f->set_##name ? S_IWUSR : 0)
-static int target_attribute_is_visible(struct kobject *kobj,
- struct attribute *attr, int i)
+static mode_t target_attribute_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
struct scsi_target *starget = transport_class_to_starget(cdev);
@@ -1428,7 +1429,7 @@ static int target_attribute_is_visible(struct kobject *kobj,
spi_support_ius(starget))
return TARGET_ATTRIBUTE_HELPER(hold_mcs);
else if (attr == &dev_attr_revalidate.attr)
- return 1;
+ return S_IWUSR;
return 0;
}
@@ -1462,25 +1463,9 @@ static int spi_target_configure(struct transport_container *tc,
struct device *cdev)
{
struct kobject *kobj = &cdev->kobj;
- int i;
- struct attribute *attr;
- int rc;
-
- for (i = 0; (attr = target_attributes[i]) != NULL; i++) {
- int j = target_attribute_group.is_visible(kobj, attr, i);
-
- /* FIXME: as well as returning -EEXIST, which we'd like
- * to ignore, sysfs also does a WARN_ON and dumps a trace,
- * which is bad, so temporarily, skip attributes that are
- * already visible (the revalidate one) */
- if (j && attr != &dev_attr_revalidate.attr)
- rc = sysfs_add_file_to_group(kobj, attr,
- target_attribute_group.name);
- /* and make the attribute writeable if we have a set
- * function */
- if ((j & 1))
- rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
- }
+
+ /* force an update based on parameters read from the device */
+ sysfs_update_group(kobj, &target_attribute_group);
return 0;
}
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 03e35967050..31fe6051c79 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -313,7 +313,8 @@ static struct platform_driver sgiwd93_driver = {
.probe = sgiwd93_probe,
.remove = __devexit_p(sgiwd93_remove),
.driver = {
- .name = "sgiwd93"
+ .name = "sgiwd93",
+ .owner = THIS_MODULE,
}
};
@@ -333,3 +334,4 @@ module_exit(sgiwd93_module_exit);
MODULE_DESCRIPTION("SGI WD33C93 driver");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sgiwd93");
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 0a6b45b1b00..2bbef4c45a0 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -53,6 +53,7 @@
MODULE_AUTHOR("Thomas Bogendörfer");
MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:snirm_53c710");
#define SNIRM710_CLOCK 32
@@ -136,6 +137,7 @@ static struct platform_driver snirm710_driver = {
.remove = __devexit_p(snirm710_driver_remove),
.driver = {
.name = "snirm_53c710",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a860c3a9ae9..e8db66ad0bd 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4322,7 +4322,7 @@ static void do_remove_sysfs_files(void)
static ssize_t
st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
@@ -4334,7 +4334,7 @@ DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL);
static ssize_t
st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
@@ -4346,7 +4346,7 @@ DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL);
static ssize_t
st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
char *fmt;
@@ -4361,7 +4361,7 @@ static ssize_t
st_defcompression_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
ssize_t l = 0;
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
@@ -4373,7 +4373,7 @@ DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
static ssize_t
st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+ struct st_modedef *STm = dev_get_drvdata(dev);
struct scsi_tape *STp;
int i, j, options;
ssize_t l = 0;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 06152c7fa68..7514b3a0390 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -294,6 +294,7 @@ static struct platform_driver esp_sun3x_driver = {
.remove = __devexit_p(esp_sun3x_remove),
.driver = {
.name = "sun3x_esp",
+ .owner = THIS_MODULE,
},
};
@@ -314,3 +315,4 @@ MODULE_VERSION(DRV_VERSION);
module_init(sun3x_esp_init);
module_exit(sun3x_esp_exit);
+MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 58d7eee4fe8..640333b1e75 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1715,13 +1715,12 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned in
}
-static irqreturn_t ihdlr(int irq, unsigned int j) {
+static irqreturn_t ihdlr(unsigned int j)
+{
struct scsi_cmnd *SCpnt;
unsigned int i, k, c, status, tstatus, reg, ret;
struct mscp *spp, *cpp;
-
- if (sh[j]->irq != irq)
- panic("%s: ihdlr, irq %d, sh[j]->irq %d.\n", BN(j), irq, sh[j]->irq);
+ int irq = sh[j]->irq;
/* Check if this board need to be serviced */
if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none;
@@ -1935,7 +1934,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap) {
if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE;
spin_lock_irqsave(sh[j]->host_lock, spin_flags);
- ret = ihdlr(irq, j);
+ ret = ihdlr(j);
spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
return ret;
}
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 678c02f1ae2..a452ac67fc9 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -224,12 +224,11 @@ static int v9fs_show_options(struct seq_file *m, struct vfsmount *mnt)
}
static void
-v9fs_umount_begin(struct vfsmount *vfsmnt, int flags)
+v9fs_umount_begin(struct super_block *sb)
{
- struct v9fs_session_info *v9ses = vfsmnt->mnt_sb->s_fs_info;
+ struct v9fs_session_info *v9ses = sb->s_fs_info;
- if (flags & MNT_FORCE)
- v9fs_session_cancel(v9ses);
+ v9fs_session_cancel(v9ses);
}
static const struct super_operations v9fs_super_ops = {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 5e1a4fb5cac..9924581df6f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -543,7 +543,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
unsigned long interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc = 0;
- struct files_struct *files;
int executable_stack = EXSTACK_DEFAULT;
unsigned long def_flags = 0;
struct {
@@ -593,20 +592,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_ph;
}
- files = current->files; /* Refcounted so ok */
- retval = unshare_files();
- if (retval < 0)
- goto out_free_ph;
- if (files == current->files) {
- put_files_struct(files);
- files = NULL;
- }
-
- /* exec will make our files private anyway, but for the a.out
- loader stuff we need to do it earlier */
retval = get_unused_fd();
if (retval < 0)
- goto out_free_fh;
+ goto out_free_ph;
get_file(bprm->file);
fd_install(elf_exec_fileno = retval, bprm->file);
@@ -728,12 +716,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (retval)
goto out_free_dentry;
- /* Discard our unneeded old files struct */
- if (files) {
- put_files_struct(files);
- files = NULL;
- }
-
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
@@ -1016,9 +998,6 @@ out_free_interp:
kfree(elf_interpreter);
out_free_file:
sys_close(elf_exec_fileno);
-out_free_fh:
- if (files)
- reset_files_struct(current, files);
out_free_ph:
kfree(elf_phdata);
goto out;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index b53c7e5f41b..dbf0ac0523d 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -110,7 +110,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
char *iname_addr = iname;
int retval;
int fd_binary = -1;
- struct files_struct *files = NULL;
retval = -ENOEXEC;
if (!enabled)
@@ -133,21 +132,13 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (fmt->flags & MISC_FMT_OPEN_BINARY) {
- files = current->files;
- retval = unshare_files();
- if (retval < 0)
- goto _ret;
- if (files == current->files) {
- put_files_struct(files);
- files = NULL;
- }
/* if the binary should be opened on behalf of the
* interpreter than keep it open and assign descriptor
* to it */
fd_binary = get_unused_fd();
if (fd_binary < 0) {
retval = fd_binary;
- goto _unshare;
+ goto _ret;
}
fd_install(fd_binary, bprm->file);
@@ -205,10 +196,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (retval < 0)
goto _error;
- if (files) {
- put_files_struct(files);
- files = NULL;
- }
_ret:
return retval;
_error:
@@ -216,9 +203,6 @@ _error:
sys_close(fd_binary);
bprm->interp_flags = 0;
bprm->interp_data = 0;
-_unshare:
- if (files)
- reset_files_struct(current, files);
goto _ret;
}
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index 14c63527c76..fdc36bfd6a7 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -194,7 +194,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
unsigned long som_entry;
struct som_hdr *som_ex;
struct som_exec_auxhdr *hpuxhdr;
- struct files_struct *files;
/* Get the exec-header */
som_ex = (struct som_hdr *) bprm->buf;
@@ -221,15 +220,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
goto out_free;
}
- files = current->files; /* Refcounted so ok */
- retval = unshare_files();
- if (retval < 0)
- goto out_free;
- if (files == current->files) {
- put_files_struct(files);
- files = NULL;
- }
-
retval = get_unused_fd();
if (retval < 0)
goto out_free;
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index f53f41ff166..95024c066d8 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -25,14 +25,26 @@
static LIST_HEAD(cifs_dfs_automount_list);
-/*
- * DFS functions
-*/
+static void cifs_dfs_expire_automounts(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cifs_dfs_automount_task,
+ cifs_dfs_expire_automounts);
+static int cifs_dfs_mountpoint_expiry_timeout = 500 * HZ;
+
+static void cifs_dfs_expire_automounts(struct work_struct *work)
+{
+ struct list_head *list = &cifs_dfs_automount_list;
+
+ mark_mounts_for_expiry(list);
+ if (!list_empty(list))
+ schedule_delayed_work(&cifs_dfs_automount_task,
+ cifs_dfs_mountpoint_expiry_timeout);
+}
-void dfs_shrink_umount_helper(struct vfsmount *vfsmnt)
+void cifs_dfs_release_automount_timer(void)
{
- mark_mounts_for_expiry(&cifs_dfs_automount_list);
- mark_mounts_for_expiry(&cifs_dfs_automount_list);
+ BUG_ON(!list_empty(&cifs_dfs_automount_list));
+ cancel_delayed_work(&cifs_dfs_automount_task);
+ flush_scheduled_work();
}
/**
@@ -261,10 +273,11 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
err = do_add_mount(newmnt, nd, nd->path.mnt->mnt_flags, mntlist);
switch (err) {
case 0:
- dput(nd->path.dentry);
- mntput(nd->path.mnt);
+ path_put(&nd->path);
nd->path.mnt = newmnt;
nd->path.dentry = dget(newmnt->mnt_root);
+ schedule_delayed_work(&cifs_dfs_automount_task,
+ cifs_dfs_mountpoint_expiry_timeout);
break;
case -EBUSY:
/* someone else made a mount here whilst we were busy */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index a04b17e5a9d..39c2cbdface 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -466,16 +466,11 @@ static struct quotactl_ops cifs_quotactl_ops = {
};
#endif
-static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
+static void cifs_umount_begin(struct super_block *sb)
{
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsTconInfo *tcon;
- dfs_shrink_umount_helper(vfsmnt);
-
- if (!(flags & MNT_FORCE))
- return;
- cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
if (cifs_sb == NULL)
return;
@@ -1100,6 +1095,7 @@ exit_cifs(void)
cFYI(DBG2, ("exit_cifs"));
cifs_proc_clean();
#ifdef CONFIG_CIFS_DFS_UPCALL
+ cifs_dfs_release_automount_timer();
unregister_key_type(&key_type_dns_resolver);
#endif
#ifdef CONFIG_CIFS_UPCALL
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 0c83da4a7da..50f9fdae19b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -104,13 +104,7 @@ extern int mode_to_acl(struct inode *inode, const char *path, __u64);
extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
const char *);
extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
-#ifdef CONFIG_CIFS_DFS_UPCALL
-extern void dfs_shrink_umount_helper(struct vfsmount *vfsmnt);
-#else
-static inline void dfs_shrink_umount_helper(struct vfsmount *vfsmnt)
-{
-}
-#endif /* DFS_UPCALL */
+extern void cifs_dfs_release_automount_timer(void);
void cifs_proc_init(void);
void cifs_proc_clean(void);
diff --git a/fs/exec.c b/fs/exec.c
index 54a0a557b67..b152029f18f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -953,7 +953,6 @@ int flush_old_exec(struct linux_binprm * bprm)
{
char * name;
int i, ch, retval;
- struct files_struct *files;
char tcomm[sizeof(current->comm)];
/*
@@ -965,26 +964,15 @@ int flush_old_exec(struct linux_binprm * bprm)
goto out;
/*
- * Make sure we have private file handles. Ask the
- * fork helper to do the work for us and the exit
- * helper to do the cleanup of the old one.
- */
- files = current->files; /* refcounted so safe to hold */
- retval = unshare_files();
- if (retval)
- goto out;
- /*
* Release all of the old mmap stuff
*/
retval = exec_mmap(bprm->mm);
if (retval)
- goto mmap_failed;
+ goto out;
bprm->mm = NULL; /* We're using it now */
/* This is the point of no return */
- put_files_struct(files);
-
current->sas_ss_sp = current->sas_ss_size = 0;
if (current->euid == current->uid && current->egid == current->gid)
@@ -1034,8 +1022,6 @@ int flush_old_exec(struct linux_binprm * bprm)
return 0;
-mmap_failed:
- reset_files_struct(current, files);
out:
return retval;
}
@@ -1283,12 +1269,17 @@ int do_execve(char * filename,
struct linux_binprm *bprm;
struct file *file;
unsigned long env_p;
+ struct files_struct *displaced;
int retval;
+ retval = unshare_files(&displaced);
+ if (retval)
+ goto out_ret;
+
retval = -ENOMEM;
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
if (!bprm)
- goto out_ret;
+ goto out_files;
file = open_exec(filename);
retval = PTR_ERR(file);
@@ -1343,6 +1334,8 @@ int do_execve(char * filename,
security_bprm_free(bprm);
acct_update_integrals(current);
kfree(bprm);
+ if (displaced)
+ put_files_struct(displaced);
return retval;
}
@@ -1363,6 +1356,9 @@ out_file:
out_kfree:
kfree(bprm);
+out_files:
+ if (displaced)
+ reset_files_struct(displaced);
out_ret:
return retval;
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index e632da761fc..3f3ac630ccd 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -55,14 +55,16 @@ static int get_close_on_exec(unsigned int fd)
* file_lock held for write.
*/
-static int locate_fd(struct files_struct *files,
- struct file *file, unsigned int orig_start)
+static int locate_fd(unsigned int orig_start, int cloexec)
{
+ struct files_struct *files = current->files;
unsigned int newfd;
unsigned int start;
int error;
struct fdtable *fdt;
+ spin_lock(&files->file_lock);
+
error = -EINVAL;
if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
goto out;
@@ -97,42 +99,28 @@ repeat:
if (error)
goto repeat;
- /*
- * We reacquired files_lock, so we are safe as long as
- * we reacquire the fdtable pointer and use it while holding
- * the lock, no one can free it during that time.
- */
if (start <= files->next_fd)
files->next_fd = newfd + 1;
+ FD_SET(newfd, fdt->open_fds);
+ if (cloexec)
+ FD_SET(newfd, fdt->close_on_exec);
+ else
+ FD_CLR(newfd, fdt->close_on_exec);
error = newfd;
-
+
out:
+ spin_unlock(&files->file_lock);
return error;
}
static int dupfd(struct file *file, unsigned int start, int cloexec)
{
- struct files_struct * files = current->files;
- struct fdtable *fdt;
- int fd;
-
- spin_lock(&files->file_lock);
- fd = locate_fd(files, file, start);
- if (fd >= 0) {
- /* locate_fd() may have expanded fdtable, load the ptr */
- fdt = files_fdtable(files);
- FD_SET(fd, fdt->open_fds);
- if (cloexec)
- FD_SET(fd, fdt->close_on_exec);
- else
- FD_CLR(fd, fdt->close_on_exec);
- spin_unlock(&files->file_lock);
+ int fd = locate_fd(start, cloexec);
+ if (fd >= 0)
fd_install(fd, file);
- } else {
- spin_unlock(&files->file_lock);
+ else
fput(file);
- }
return fd;
}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 033f7bdd47e..4df34da2284 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -242,10 +242,9 @@ struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
return inode;
}
-static void fuse_umount_begin(struct vfsmount *vfsmnt, int flags)
+static void fuse_umount_begin(struct super_block *sb)
{
- if (flags & MNT_FORCE)
- fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb));
+ fuse_abort_conn(get_fuse_conn_super(sb));
}
static void fuse_send_destroy(struct fuse_conn *fc)
diff --git a/fs/locks.c b/fs/locks.c
index e1ea2fe0368..44d9a6a7ec5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -236,6 +236,7 @@ void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
new->fl_ops = NULL;
new->fl_lmops = NULL;
}
+EXPORT_SYMBOL(__locks_copy_lock);
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
diff --git a/fs/namespace.c b/fs/namespace.c
index 0505fb61aa7..f48f98110c3 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1061,10 +1061,11 @@ static int do_umount(struct vfsmount *mnt, int flags)
* about for the moment.
*/
- lock_kernel();
- if (sb->s_op->umount_begin)
- sb->s_op->umount_begin(mnt, flags);
- unlock_kernel();
+ if (flags & MNT_FORCE && sb->s_op->umount_begin) {
+ lock_kernel();
+ sb->s_op->umount_begin(sb);
+ unlock_kernel();
+ }
/*
* No sense to grab the lock for this test, but test itself looks
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 20a1cb1810f..fa220dc7460 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -198,7 +198,7 @@ static match_table_t nfs_secflavor_tokens = {
};
-static void nfs_umount_begin(struct vfsmount *, int);
+static void nfs_umount_begin(struct super_block *);
static int nfs_statfs(struct dentry *, struct kstatfs *);
static int nfs_show_options(struct seq_file *, struct vfsmount *);
static int nfs_show_stats(struct seq_file *, struct vfsmount *);
@@ -647,13 +647,11 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
* Begin unmount by attempting to remove all automounted mountpoints we added
* in response to xdev traversals and referrals
*/
-static void nfs_umount_begin(struct vfsmount *vfsmnt, int flags)
+static void nfs_umount_begin(struct super_block *sb)
{
- struct nfs_server *server = NFS_SB(vfsmnt->mnt_sb);
+ struct nfs_server *server = NFS_SB(sb);
struct rpc_clnt *rpc;
- if (!(flags & MNT_FORCE))
- return;
/* -EIO all pending I/O */
rpc = server->client_acl;
if (!IS_ERR(rpc))
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index ade9a7e6a75..dbdfabbfd60 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -477,11 +477,10 @@ const struct file_operations sysfs_file_operations = {
.poll = sysfs_poll,
};
-
-int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
- int type)
+int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
+ const struct attribute *attr, int type, mode_t amode)
{
- umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
+ umode_t mode = (amode & S_IALLUGO) | S_IFREG;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
int rc;
@@ -502,6 +501,13 @@ int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
}
+int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
+ int type)
+{
+ return sysfs_add_file_mode(dir_sd, attr, type, attr->mode);
+}
+
+
/**
* sysfs_create_file - create an attribute file for an object.
* @kobj: object we're creating for.
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 47790491503..eeba38417b1 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -23,35 +23,50 @@ static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
int i;
for (i = 0, attr = grp->attrs; *attr; i++, attr++)
- if (!grp->is_visible ||
- grp->is_visible(kobj, *attr, i))
- sysfs_hash_and_remove(dir_sd, (*attr)->name);
+ sysfs_hash_and_remove(dir_sd, (*attr)->name);
}
static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
- const struct attribute_group *grp)
+ const struct attribute_group *grp, int update)
{
struct attribute *const* attr;
int error = 0, i;
- for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++)
- if (!grp->is_visible ||
- grp->is_visible(kobj, *attr, i))
- error |=
- sysfs_add_file(dir_sd, *attr, SYSFS_KOBJ_ATTR);
+ for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) {
+ mode_t mode = 0;
+
+ /* in update mode, we're changing the permissions or
+ * visibility. Do this by first removing then
+ * re-adding (if required) the file */
+ if (update)
+ sysfs_hash_and_remove(dir_sd, (*attr)->name);
+ if (grp->is_visible) {
+ mode = grp->is_visible(kobj, *attr, i);
+ if (!mode)
+ continue;
+ }
+ error = sysfs_add_file_mode(dir_sd, *attr, SYSFS_KOBJ_ATTR,
+ (*attr)->mode | mode);
+ if (unlikely(error))
+ break;
+ }
if (error)
remove_files(dir_sd, kobj, grp);
return error;
}
-int sysfs_create_group(struct kobject * kobj,
- const struct attribute_group * grp)
+static int internal_create_group(struct kobject *kobj, int update,
+ const struct attribute_group *grp)
{
struct sysfs_dirent *sd;
int error;
- BUG_ON(!kobj || !kobj->sd);
+ BUG_ON(!kobj || (!update && !kobj->sd));
+
+ /* Updates may happen before the object has been instantiated */
+ if (unlikely(update && !kobj->sd))
+ return -EINVAL;
if (grp->name) {
error = sysfs_create_subdir(kobj, grp->name, &sd);
@@ -60,7 +75,7 @@ int sysfs_create_group(struct kobject * kobj,
} else
sd = kobj->sd;
sysfs_get(sd);
- error = create_files(sd, kobj, grp);
+ error = create_files(sd, kobj, grp, update);
if (error) {
if (grp->name)
sysfs_remove_subdir(sd);
@@ -69,6 +84,47 @@ int sysfs_create_group(struct kobject * kobj,
return error;
}
+/**
+ * sysfs_create_group - given a directory kobject, create an attribute group
+ * @kobj: The kobject to create the group on
+ * @grp: The attribute group to create
+ *
+ * This function creates a group for the first time. It will explicitly
+ * warn and error if any of the attribute files being created already exist.
+ *
+ * Returns 0 on success or error.
+ */
+int sysfs_create_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return internal_create_group(kobj, 0, grp);
+}
+
+/**
+ * sysfs_update_group - given a directory kobject, create an attribute group
+ * @kobj: The kobject to create the group on
+ * @grp: The attribute group to create
+ *
+ * This function updates an attribute group. Unlike
+ * sysfs_create_group(), it will explicitly not warn or error if any
+ * of the attribute files being created already exist. Furthermore,
+ * if the visibility of the files has changed through the is_visible()
+ * callback, it will update the permissions and add or remove the
+ * relevant files.
+ *
+ * The primary use for this function is to call it after making a change
+ * that affects group visibility.
+ *
+ * Returns 0 on success or error.
+ */
+int sysfs_update_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return internal_create_group(kobj, 1, grp);
+}
+
+
+
void sysfs_remove_group(struct kobject * kobj,
const struct attribute_group * grp)
{
@@ -95,4 +151,5 @@ void sysfs_remove_group(struct kobject * kobj,
EXPORT_SYMBOL_GPL(sysfs_create_group);
+EXPORT_SYMBOL_GPL(sysfs_update_group);
EXPORT_SYMBOL_GPL(sysfs_remove_group);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index ff17f8da9b4..ce4e15f8aae 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -154,6 +154,8 @@ extern const struct file_operations sysfs_file_operations;
int sysfs_add_file(struct sysfs_dirent *dir_sd,
const struct attribute *attr, int type);
+int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
+ const struct attribute *attr, int type, mode_t amode);
/*
* bin.c
*/
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 9e19a704d48..15f3ae25c51 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -388,6 +388,11 @@ static inline int fls64(unsigned long x)
}
#endif
+static inline unsigned long __fls(unsigned long x)
+{
+ return fls64(x) - 1;
+}
+
static inline int fls(int x)
{
return fls64((unsigned int) x);
diff --git a/include/asm-arm/arch-sa1100/ide.h b/include/asm-arm/arch-sa1100/ide.h
index 98b10bcf9f1..b14cbda01dc 100644
--- a/include/asm-arm/arch-sa1100/ide.h
+++ b/include/asm-arm/arch-sa1100/ide.h
@@ -37,12 +37,12 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port,
memset(hw, 0, sizeof(*hw));
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw->io_ports[i] = reg;
+ for (i = 0; i <= 7; i++) {
+ hw->io_ports_array[i] = reg;
reg += regincr;
}
- hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+ hw->io_ports.ctl_addr = ctrl_port;
if (irq)
*irq = 0;
diff --git a/include/asm-cris/arch-v10/ide.h b/include/asm-cris/arch-v10/ide.h
index ea34e0d0a38..5366e623932 100644
--- a/include/asm-cris/arch-v10/ide.h
+++ b/include/asm-cris/arch-v10/ide.h
@@ -59,22 +59,19 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, u
int i;
/* fill in ports for ATA addresses 0 to 7 */
-
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw->io_ports[i] = data_port |
+ for (i = 0; i <= 7; i++) {
+ hw->io_ports_array[i] = data_port |
IO_FIELD(R_ATA_CTRL_DATA, addr, i) |
IO_STATE(R_ATA_CTRL_DATA, cs0, active);
}
/* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */
-
- hw->io_ports[IDE_CONTROL_OFFSET] = data_port |
+ hw->io_ports.ctl_addr = data_port |
IO_FIELD(R_ATA_CTRL_DATA, addr, 6) |
IO_STATE(R_ATA_CTRL_DATA, cs1, active);
/* whats this for ? */
-
- hw->io_ports[IDE_IRQ_OFFSET] = 0;
+ hw->io_ports.irq_addr = 0;
}
static inline void ide_init_default_hwifs(void)
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
new file mode 100644
index 00000000000..be24465403d
--- /dev/null
+++ b/include/asm-generic/bitops/__fls.h
@@ -0,0 +1,43 @@
+#ifndef _ASM_GENERIC_BITOPS___FLS_H_
+#define _ASM_GENERIC_BITOPS___FLS_H_
+
+#include <asm/types.h>
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ int num = BITS_PER_LONG - 1;
+
+#if BITS_PER_LONG == 64
+ if (!(word & (~0ul << 32))) {
+ num -= 32;
+ word <<= 32;
+ }
+#endif
+ if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
+ num -= 16;
+ word <<= 16;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
+ num -= 8;
+ word <<= 8;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
+ num -= 4;
+ word <<= 4;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
+ num -= 2;
+ word <<= 2;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-1))))
+ num -= 1;
+ return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 72a51e5a12e..1914e974251 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -1,11 +1,13 @@
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
#define _ASM_GENERIC_BITOPS_FIND_H_
+#ifndef CONFIG_GENERIC_FIND_NEXT_BIT
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
long size, unsigned long offset);
+#endif
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
index 1b6b17ce242..86d403f8b25 100644
--- a/include/asm-generic/bitops/fls64.h
+++ b/include/asm-generic/bitops/fls64.h
@@ -3,6 +3,18 @@
#include <asm/types.h>
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+#if BITS_PER_LONG == 32
static inline int fls64(__u64 x)
{
__u32 h = x >> 32;
@@ -10,5 +22,15 @@ static inline int fls64(__u64 x)
return fls(h) + 32;
return fls(x);
}
+#elif BITS_PER_LONG == 64
+static inline int fls64(__u64 x)
+{
+ if (x == 0)
+ return 0;
+ return __fls(x) + 1;
+}
+#else
+#error BITS_PER_LONG not 32 or 64
+#endif
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 953d3df9dd2..e2ca8003733 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -407,6 +407,22 @@ fls (int t)
return ia64_popcnt(x);
}
+/*
+ * Find the last (most significant) bit set. Undefined for x==0.
+ * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
+ */
+static inline unsigned long
+__fls (unsigned long x)
+{
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x |= x >> 32;
+ return ia64_popcnt(x) - 1;
+}
+
#include <asm-generic/bitops/fls64.h>
/*
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
index de2ed2cbdd8..2fe292c275f 100644
--- a/include/asm-ia64/gcc_intrin.h
+++ b/include/asm-ia64/gcc_intrin.h
@@ -21,6 +21,10 @@
#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
extern void ia64_bad_param_for_setreg (void);
extern void ia64_bad_param_for_getreg (void);
@@ -517,6 +521,14 @@ do { \
#define ia64_ptrd(addr, size) \
asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+#define ia64_ttag(addr) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
+ ia64_intri_res; \
+})
+
+
/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
#define ia64_lfhint_none 0
diff --git a/include/asm-ia64/kvm.h b/include/asm-ia64/kvm.h
index 030d29b4b26..eb2d3559d08 100644
--- a/include/asm-ia64/kvm.h
+++ b/include/asm-ia64/kvm.h
@@ -1,6 +1,205 @@
-#ifndef __LINUX_KVM_IA64_H
-#define __LINUX_KVM_IA64_H
+#ifndef __ASM_IA64_KVM_H
+#define __ASM_IA64_KVM_H
-/* ia64 does not support KVM */
+/*
+ * asm-ia64/kvm.h: kvm structure definitions for ia64
+ *
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <asm/types.h>
+#include <asm/fpu.h>
+
+#include <linux/ioctl.h>
+
+/* Architectural interrupt line count. */
+#define KVM_NR_INTERRUPTS 256
+
+#define KVM_IOAPIC_NUM_PINS 24
+
+struct kvm_ioapic_state {
+ __u64 base_address;
+ __u32 ioregsel;
+ __u32 id;
+ __u32 irr;
+ __u32 pad;
+ union {
+ __u64 bits;
+ struct {
+ __u8 vector;
+ __u8 delivery_mode:3;
+ __u8 dest_mode:1;
+ __u8 delivery_status:1;
+ __u8 polarity:1;
+ __u8 remote_irr:1;
+ __u8 trig_mode:1;
+ __u8 mask:1;
+ __u8 reserve:7;
+ __u8 reserved[4];
+ __u8 dest_id;
+ } fields;
+ } redirtbl[KVM_IOAPIC_NUM_PINS];
+};
+
+#define KVM_IRQCHIP_PIC_MASTER 0
+#define KVM_IRQCHIP_PIC_SLAVE 1
+#define KVM_IRQCHIP_IOAPIC 2
+
+#define KVM_CONTEXT_SIZE 8*1024
+
+union context {
+ /* 8K size */
+ char dummy[KVM_CONTEXT_SIZE];
+ struct {
+ unsigned long psr;
+ unsigned long pr;
+ unsigned long caller_unat;
+ unsigned long pad;
+ unsigned long gr[32];
+ unsigned long ar[128];
+ unsigned long br[8];
+ unsigned long cr[128];
+ unsigned long rr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long pkr[8];
+ struct ia64_fpreg fr[128];
+ };
+};
+
+struct thash_data {
+ union {
+ struct {
+ unsigned long p : 1; /* 0 */
+ unsigned long rv1 : 1; /* 1 */
+ unsigned long ma : 3; /* 2-4 */
+ unsigned long a : 1; /* 5 */
+ unsigned long d : 1; /* 6 */
+ unsigned long pl : 2; /* 7-8 */
+ unsigned long ar : 3; /* 9-11 */
+ unsigned long ppn : 38; /* 12-49 */
+ unsigned long rv2 : 2; /* 50-51 */
+ unsigned long ed : 1; /* 52 */
+ unsigned long ig1 : 11; /* 53-63 */
+ };
+ struct {
+ unsigned long __rv1 : 53; /* 0-52 */
+ unsigned long contiguous : 1; /*53 */
+ unsigned long tc : 1; /* 54 TR or TC */
+ unsigned long cl : 1;
+ /* 55 I side or D side cache line */
+ unsigned long len : 4; /* 56-59 */
+ unsigned long io : 1; /* 60 entry is for io or not */
+ unsigned long nomap : 1;
+ /* 61 entry cann't be inserted into machine TLB.*/
+ unsigned long checked : 1;
+ /* 62 for VTLB/VHPT sanity check */
+ unsigned long invalid : 1;
+ /* 63 invalid entry */
+ };
+ unsigned long page_flags;
+ }; /* same for VHPT and TLB */
+
+ union {
+ struct {
+ unsigned long rv3 : 2;
+ unsigned long ps : 6;
+ unsigned long key : 24;
+ unsigned long rv4 : 32;
+ };
+ unsigned long itir;
+ };
+ union {
+ struct {
+ unsigned long ig2 : 12;
+ unsigned long vpn : 49;
+ unsigned long vrn : 3;
+ };
+ unsigned long ifa;
+ unsigned long vadr;
+ struct {
+ unsigned long tag : 63;
+ unsigned long ti : 1;
+ };
+ unsigned long etag;
+ };
+ union {
+ struct thash_data *next;
+ unsigned long rid;
+ unsigned long gpaddr;
+ };
+};
+
+#define NITRS 8
+#define NDTRS 8
+
+struct saved_vpd {
+ unsigned long vhpi;
+ unsigned long vgr[16];
+ unsigned long vbgr[16];
+ unsigned long vnat;
+ unsigned long vbnat;
+ unsigned long vcpuid[5];
+ unsigned long vpsr;
+ unsigned long vpr;
+ unsigned long vcr[128];
+};
+
+struct kvm_regs {
+ char *saved_guest;
+ char *saved_stack;
+ struct saved_vpd vpd;
+ /*Arch-regs*/
+ int mp_state;
+ unsigned long vmm_rr;
+ /* TR and TC. */
+ struct thash_data itrs[NITRS];
+ struct thash_data dtrs[NDTRS];
+ /* Bit is set if there is a tr/tc for the region. */
+ unsigned char itr_regions;
+ unsigned char dtr_regions;
+ unsigned char tc_regions;
+
+ char irq_check;
+ unsigned long saved_itc;
+ unsigned long itc_check;
+ unsigned long timer_check;
+ unsigned long timer_pending;
+ unsigned long last_itc;
+
+ unsigned long vrr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long insvc[4]; /* Interrupt in service. */
+ unsigned long xtp;
+
+ unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_saved_rr0; /* from kvm_arch */
+ unsigned long metaphysical_saved_rr4; /* from kvm_arch */
+ unsigned long fp_psr; /*used for lazy float register */
+ unsigned long saved_gp;
+ /*for phycial emulation */
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
#endif
diff --git a/include/asm-ia64/kvm_host.h b/include/asm-ia64/kvm_host.h
new file mode 100644
index 00000000000..c082c208c1f
--- /dev/null
+++ b/include/asm-ia64/kvm_host.h
@@ -0,0 +1,524 @@
+/*
+ * kvm_host.h: used for kvm module, and hold ia64-specific sections.
+ *
+ * Copyright (C) 2007, Intel Corporation.
+ *
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#ifndef __ASM_KVM_HOST_H
+#define __ASM_KVM_HOST_H
+
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+#include <linux/kvm_types.h>
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+
+#define KVM_MAX_VCPUS 4
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+
+/* define exit reasons from vmm to kvm*/
+#define EXIT_REASON_VM_PANIC 0
+#define EXIT_REASON_MMIO_INSTRUCTION 1
+#define EXIT_REASON_PAL_CALL 2
+#define EXIT_REASON_SAL_CALL 3
+#define EXIT_REASON_SWITCH_RR6 4
+#define EXIT_REASON_VM_DESTROY 5
+#define EXIT_REASON_EXTERNAL_INTERRUPT 6
+#define EXIT_REASON_IPI 7
+#define EXIT_REASON_PTC_G 8
+
+/*Define vmm address space and vm data space.*/
+#define KVM_VMM_SIZE (16UL<<20)
+#define KVM_VMM_SHIFT 24
+#define KVM_VMM_BASE 0xD000000000000000UL
+#define VMM_SIZE (8UL<<20)
+
+/*
+ * Define vm_buffer, used by PAL Services, base address.
+ * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M
+ */
+#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
+#define KVM_VM_BUFFER_SIZE (8UL<<20)
+
+/*Define Virtual machine data layout.*/
+#define KVM_VM_DATA_SHIFT 24
+#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT)
+#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE)
+
+
+#define KVM_P2M_BASE KVM_VM_DATA_BASE
+#define KVM_P2M_OFS 0
+#define KVM_P2M_SIZE (8UL << 20)
+
+#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE)
+#define KVM_VHPT_OFS KVM_P2M_SIZE
+#define KVM_VHPT_BLOCK_SIZE (2UL << 20)
+#define VHPT_SHIFT 18
+#define VHPT_SIZE (1UL << VHPT_SHIFT)
+#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5))
+
+#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE)
+#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE)
+#define KVM_VTLB_BLOCK_SIZE (1UL<<20)
+#define VTLB_SHIFT 17
+#define VTLB_SIZE (1UL<<VTLB_SHIFT)
+#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5))
+
+#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE)
+#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE)
+#define KVM_VPD_BLOCK_SIZE (2UL<<20)
+#define VPD_SHIFT 16
+#define VPD_SIZE (1UL<<VPD_SHIFT)
+
+#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE)
+#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE)
+#define KVM_VCPU_BLOCK_SIZE (2UL<<20)
+#define VCPU_SHIFT 18
+#define VCPU_SIZE (1UL<<VCPU_SHIFT)
+#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE
+
+#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE)
+#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE)
+#define KVM_VM_BLOCK_SIZE (1UL<<19)
+
+#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE)
+#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE)
+#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19)
+
+/* Get vpd, vhpt, tlb, vcpu, base*/
+#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE)
+#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE)
+#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE)
+#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE)
+
+/*IO section definitions*/
+#define IOREQ_READ 1
+#define IOREQ_WRITE 0
+
+#define STATE_IOREQ_NONE 0
+#define STATE_IOREQ_READY 1
+#define STATE_IOREQ_INPROCESS 2
+#define STATE_IORESP_READY 3
+
+/*Guest Physical address layout.*/
+#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
+#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
+#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
+#define GPFN_PIB (3UL << 60) /* PIB base */
+#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
+#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
+#define GPFN_GFW (6UL << 60) /* Guest Firmware */
+#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */
+
+#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
+#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
+#define INVALID_MFN (~0UL)
+#define MEM_G (1UL << 30)
+#define MEM_M (1UL << 20)
+#define MMIO_START (3 * MEM_G)
+#define MMIO_SIZE (512 * MEM_M)
+#define VGA_IO_START 0xA0000UL
+#define VGA_IO_SIZE 0x20000
+#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
+#define LEGACY_IO_SIZE (64 * MEM_M)
+#define IO_SAPIC_START 0xfec00000UL
+#define IO_SAPIC_SIZE 0x100000
+#define PIB_START 0xfee00000UL
+#define PIB_SIZE 0x200000
+#define GFW_START (4 * MEM_G - 16 * MEM_M)
+#define GFW_SIZE (16 * MEM_M)
+
+/*Deliver mode, defined for ioapic.c*/
+#define dest_Fixed IOSAPIC_FIXED
+#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
+
+#define NMI_VECTOR 2
+#define ExtINT_VECTOR 0
+#define NULL_VECTOR (-1)
+#define IA64_SPURIOUS_INT_VECTOR 0x0f
+
+#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
+
+/*
+ *Delivery mode
+ */
+#define SAPIC_DELIV_SHIFT 8
+#define SAPIC_FIXED 0x0
+#define SAPIC_LOWEST_PRIORITY 0x1
+#define SAPIC_PMI 0x2
+#define SAPIC_NMI 0x4
+#define SAPIC_INIT 0x5
+#define SAPIC_EXTINT 0x7
+
+/*
+ * vcpu->requests bit members for arch
+ */
+#define KVM_REQ_PTC_G 32
+#define KVM_REQ_RESUME 33
+
+#define KVM_PAGES_PER_HPAGE 1
+
+struct kvm;
+struct kvm_vcpu;
+struct kvm_guest_debug{
+};
+
+struct kvm_mmio_req {
+ uint64_t addr; /* physical address */
+ uint64_t size; /* size in bytes */
+ uint64_t data; /* data (or paddr of data) */
+ uint8_t state:4;
+ uint8_t dir:1; /* 1=read, 0=write */
+};
+
+/*Pal data struct */
+struct kvm_pal_call{
+ /*In area*/
+ uint64_t gr28;
+ uint64_t gr29;
+ uint64_t gr30;
+ uint64_t gr31;
+ /*Out area*/
+ struct ia64_pal_retval ret;
+};
+
+/* Sal data structure */
+struct kvm_sal_call{
+ /*In area*/
+ uint64_t in0;
+ uint64_t in1;
+ uint64_t in2;
+ uint64_t in3;
+ uint64_t in4;
+ uint64_t in5;
+ uint64_t in6;
+ uint64_t in7;
+ struct sal_ret_values ret;
+};
+
+/*Guest change rr6*/
+struct kvm_switch_rr6 {
+ uint64_t old_rr;
+ uint64_t new_rr;
+};
+
+union ia64_ipi_a{
+ unsigned long val;
+ struct {
+ unsigned long rv : 3;
+ unsigned long ir : 1;
+ unsigned long eid : 8;
+ unsigned long id : 8;
+ unsigned long ib_base : 44;
+ };
+};
+
+union ia64_ipi_d {
+ unsigned long val;
+ struct {
+ unsigned long vector : 8;
+ unsigned long dm : 3;
+ unsigned long ig : 53;
+ };
+};
+
+/*ipi check exit data*/
+struct kvm_ipi_data{
+ union ia64_ipi_a addr;
+ union ia64_ipi_d data;
+};
+
+/*global purge data*/
+struct kvm_ptc_g {
+ unsigned long vaddr;
+ unsigned long rr;
+ unsigned long ps;
+ struct kvm_vcpu *vcpu;
+};
+
+/*Exit control data */
+struct exit_ctl_data{
+ uint32_t exit_reason;
+ uint32_t vm_status;
+ union {
+ struct kvm_mmio_req ioreq;
+ struct kvm_pal_call pal_data;
+ struct kvm_sal_call sal_data;
+ struct kvm_switch_rr6 rr_data;
+ struct kvm_ipi_data ipi_data;
+ struct kvm_ptc_g ptc_g_data;
+ } u;
+};
+
+union pte_flags {
+ unsigned long val;
+ struct {
+ unsigned long p : 1; /*0 */
+ unsigned long : 1; /* 1 */
+ unsigned long ma : 3; /* 2-4 */
+ unsigned long a : 1; /* 5 */
+ unsigned long d : 1; /* 6 */
+ unsigned long pl : 2; /* 7-8 */
+ unsigned long ar : 3; /* 9-11 */
+ unsigned long ppn : 38; /* 12-49 */
+ unsigned long : 2; /* 50-51 */
+ unsigned long ed : 1; /* 52 */
+ };
+};
+
+union ia64_pta {
+ unsigned long val;
+ struct {
+ unsigned long ve : 1;
+ unsigned long reserved0 : 1;
+ unsigned long size : 6;
+ unsigned long vf : 1;
+ unsigned long reserved1 : 6;
+ unsigned long base : 49;
+ };
+};
+
+struct thash_cb {
+ /* THASH base information */
+ struct thash_data *hash; /* hash table pointer */
+ union ia64_pta pta;
+ int num;
+};
+
+struct kvm_vcpu_stat {
+};
+
+struct kvm_vcpu_arch {
+ int launched;
+ int last_exit;
+ int last_run_cpu;
+ int vmm_tr_slot;
+ int vm_tr_slot;
+
+#define KVM_MP_STATE_RUNNABLE 0
+#define KVM_MP_STATE_UNINITIALIZED 1
+#define KVM_MP_STATE_INIT_RECEIVED 2
+#define KVM_MP_STATE_HALTED 3
+ int mp_state;
+
+#define MAX_PTC_G_NUM 3
+ int ptc_g_count;
+ struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
+
+ /*halt timer to wake up sleepy vcpus*/
+ struct hrtimer hlt_timer;
+ long ht_active;
+
+ struct kvm_lapic *apic; /* kernel irqchip context */
+ struct vpd *vpd;
+
+ /* Exit data for vmm_transition*/
+ struct exit_ctl_data exit_data;
+
+ cpumask_t cache_coherent_map;
+
+ unsigned long vmm_rr;
+ unsigned long host_rr6;
+ unsigned long psbits[8];
+ unsigned long cr_iipa;
+ unsigned long cr_isr;
+ unsigned long vsa_base;
+ unsigned long dirty_log_lock_pa;
+ unsigned long __gp;
+ /* TR and TC. */
+ struct thash_data itrs[NITRS];
+ struct thash_data dtrs[NDTRS];
+ /* Bit is set if there is a tr/tc for the region. */
+ unsigned char itr_regions;
+ unsigned char dtr_regions;
+ unsigned char tc_regions;
+ /* purge all */
+ unsigned long ptce_base;
+ unsigned long ptce_count[2];
+ unsigned long ptce_stride[2];
+ /* itc/itm */
+ unsigned long last_itc;
+ long itc_offset;
+ unsigned long itc_check;
+ unsigned long timer_check;
+ unsigned long timer_pending;
+
+ unsigned long vrr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long insvc[4]; /* Interrupt in service. */
+ unsigned long xtp;
+
+ unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_saved_rr0; /* from kvm_arch */
+ unsigned long metaphysical_saved_rr4; /* from kvm_arch */
+ unsigned long fp_psr; /*used for lazy float register */
+ unsigned long saved_gp;
+ /*for phycial emulation */
+ int mode_flags;
+ struct thash_cb vtlb;
+ struct thash_cb vhpt;
+ char irq_check;
+ char irq_new_pending;
+
+ unsigned long opcode;
+ unsigned long cause;
+ union context host;
+ union context guest;
+};
+
+struct kvm_vm_stat {
+ u64 remote_tlb_flush;
+};
+
+struct kvm_sal_data {
+ unsigned long boot_ip;
+ unsigned long boot_gp;
+};
+
+struct kvm_arch {
+ unsigned long vm_base;
+ unsigned long metaphysical_rr0;
+ unsigned long metaphysical_rr4;
+ unsigned long vmm_init_rr;
+ unsigned long vhpt_base;
+ unsigned long vtlb_base;
+ unsigned long vpd_base;
+ spinlock_t dirty_log_lock;
+ struct kvm_ioapic *vioapic;
+ struct kvm_vm_stat stat;
+ struct kvm_sal_data rdv_sal_data;
+};
+
+union cpuid3_t {
+ u64 value;
+ struct {
+ u64 number : 8;
+ u64 revision : 8;
+ u64 model : 8;
+ u64 family : 8;
+ u64 archrev : 8;
+ u64 rv : 24;
+ };
+};
+
+struct kvm_pt_regs {
+ /* The following registers are saved by SAVE_MIN: */
+ unsigned long b6; /* scratch */
+ unsigned long b7; /* scratch */
+
+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+ unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+ unsigned long r8; /* scratch (return value register 0) */
+ unsigned long r9; /* scratch (return value register 1) */
+ unsigned long r10; /* scratch (return value register 2) */
+ unsigned long r11; /* scratch (return value register 3) */
+
+ unsigned long cr_ipsr; /* interrupted task's psr */
+ unsigned long cr_iip; /* interrupted task's instruction pointer */
+ unsigned long cr_ifs; /* interrupted task's function state */
+
+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+ unsigned long ar_pfs; /* prev function state */
+ unsigned long ar_rsc; /* RSE configuration */
+ /* The following two are valid only if cr_ipsr.cpl > 0: */
+ unsigned long ar_rnat; /* RSE NaT */
+ unsigned long ar_bspstore; /* RSE bspstore */
+
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+ unsigned long b0; /* return pointer (bp) */
+ unsigned long loadrs; /* size of dirty partition << 16 */
+
+ unsigned long r1; /* the gp pointer */
+ unsigned long r12; /* interrupted task's memory stack pointer */
+ unsigned long r13; /* thread pointer */
+
+ unsigned long ar_fpsr; /* floating point status (preserved) */
+ unsigned long r15; /* scratch */
+
+ /* The remaining registers are NOT saved for system calls. */
+ unsigned long r14; /* scratch */
+ unsigned long r2; /* scratch */
+ unsigned long r3; /* scratch */
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
+ unsigned long ar_ccv; /* compare/exchange value (scratch) */
+
+ /*
+ * Floating point registers that the kernel considers scratch:
+ */
+ struct ia64_fpreg f6; /* scratch */
+ struct ia64_fpreg f7; /* scratch */
+ struct ia64_fpreg f8; /* scratch */
+ struct ia64_fpreg f9; /* scratch */
+ struct ia64_fpreg f10; /* scratch */
+ struct ia64_fpreg f11; /* scratch */
+
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+ unsigned long eml_unat; /* used for emulating instruction */
+ unsigned long pad0; /* alignment pad */
+};
+
+static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
+{
+ return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+}
+
+typedef int kvm_vmm_entry(void);
+typedef void kvm_tramp_entry(union context *host, union context *guest);
+
+struct kvm_vmm_info{
+ struct module *module;
+ kvm_vmm_entry *vmm_entry;
+ kvm_tramp_entry *tramp_entry;
+ unsigned long vmm_ivt;
+};
+
+int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+void kvm_sal_emul(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/include/asm-ia64/kvm_para.h b/include/asm-ia64/kvm_para.h
new file mode 100644
index 00000000000..9f9796bb344
--- /dev/null
+++ b/include/asm-ia64/kvm_para.h
@@ -0,0 +1,29 @@
+#ifndef __IA64_KVM_PARA_H
+#define __IA64_KVM_PARA_H
+
+/*
+ * asm-ia64/kvm_para.h
+ *
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 741f7ecb986..6aff126fc07 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -119,6 +119,69 @@ struct ia64_psr {
__u64 reserved4 : 19;
};
+union ia64_isr {
+ __u64 val;
+ struct {
+ __u64 code : 16;
+ __u64 vector : 8;
+ __u64 reserved1 : 8;
+ __u64 x : 1;
+ __u64 w : 1;
+ __u64 r : 1;
+ __u64 na : 1;
+ __u64 sp : 1;
+ __u64 rs : 1;
+ __u64 ir : 1;
+ __u64 ni : 1;
+ __u64 so : 1;
+ __u64 ei : 2;
+ __u64 ed : 1;
+ __u64 reserved2 : 20;
+ };
+};
+
+union ia64_lid {
+ __u64 val;
+ struct {
+ __u64 rv : 16;
+ __u64 eid : 8;
+ __u64 id : 8;
+ __u64 ig : 32;
+ };
+};
+
+union ia64_tpr {
+ __u64 val;
+ struct {
+ __u64 ig0 : 4;
+ __u64 mic : 4;
+ __u64 rsv : 8;
+ __u64 mmi : 1;
+ __u64 ig1 : 47;
+ };
+};
+
+union ia64_itir {
+ __u64 val;
+ struct {
+ __u64 rv3 : 2; /* 0-1 */
+ __u64 ps : 6; /* 2-7 */
+ __u64 key : 24; /* 8-31 */
+ __u64 rv4 : 32; /* 32-63 */
+ };
+};
+
+union ia64_rr {
+ __u64 val;
+ struct {
+ __u64 ve : 1; /* enable hw walker */
+ __u64 reserved0: 1; /* reserved */
+ __u64 ps : 6; /* log page size */
+ __u64 rid : 24; /* region id */
+ __u64 reserved1: 32; /* reserved */
+ };
+};
+
/*
* CPU type, hardware bug flags, and per-CPU state. Frequently used
* state comes earlier:
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index ec75ce4cdb8..c2bd126c3b4 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -591,6 +591,11 @@ static inline int __ilog2(unsigned long x)
return 63 - lz;
}
+static inline unsigned long __fls(unsigned long x)
+{
+ return __ilog2(x);
+}
+
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
/*
diff --git a/include/asm-mips/mach-au1x00/au1xxx_ide.h b/include/asm-mips/mach-au1x00/au1xxx_ide.h
index 89655c0cdcd..b493a5e46c6 100644
--- a/include/asm-mips/mach-au1x00/au1xxx_ide.h
+++ b/include/asm-mips/mach-au1x00/au1xxx_ide.h
@@ -70,7 +70,6 @@ typedef struct
ide_hwif_t *hwif;
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
ide_drive_t *drive;
- u8 white_list, black_list;
struct dbdma_cmd *dma_table_cpu;
dma_addr_t dma_table_dma;
#endif
@@ -81,47 +80,6 @@ typedef struct
#endif
} _auide_hwif;
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
-/* HD white list */
-static const struct drive_list_entry dma_white_list [] = {
-/*
- * Hitachi
- */
- { "HITACHI_DK14FA-20" , NULL },
- { "HTS726060M9AT00" , NULL },
-/*
- * Maxtor
- */
- { "Maxtor 6E040L0" , NULL },
- { "Maxtor 6Y080P0" , NULL },
- { "Maxtor 6Y160P0" , NULL },
-/*
- * Seagate
- */
- { "ST3120026A" , NULL },
- { "ST320014A" , NULL },
- { "ST94011A" , NULL },
- { "ST340016A" , NULL },
-/*
- * Western Digital
- */
- { "WDC WD400UE-00HCT0" , NULL },
- { "WDC WD400JB-00JJC0" , NULL },
- { NULL , NULL }
-};
-
-/* HD black list */
-static const struct drive_list_entry dma_black_list [] = {
-/*
- * Western Digital
- */
- { "WDC WD100EB-00CGH0" , NULL },
- { "WDC WD200BB-00AUA1" , NULL },
- { "WDC AC24300L" , NULL },
- { NULL , NULL }
-};
-#endif
-
/*******************************************************************************
* PIO Mode timing calculation : *
* *
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index f8eebcbad01..7a6ea10bd23 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -210,6 +210,7 @@ static __inline__ int fls(int x)
return ret;
}
+#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index a99a7492947..897eade3afb 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -313,6 +313,11 @@ static __inline__ int fls(unsigned int x)
return 32 - lz;
}
+static __inline__ unsigned long __fls(unsigned long x)
+{
+ return __ilog2(x);
+}
+
/*
* 64-bit can do this using one cntlzd (count leading zeroes doubleword)
* instruction; for 32-bit we use the generic version, which does two
diff --git a/include/asm-powerpc/kvm.h b/include/asm-powerpc/kvm.h
index d1b530fbf8d..f993e4198d5 100644
--- a/include/asm-powerpc/kvm.h
+++ b/include/asm-powerpc/kvm.h
@@ -1,6 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
#ifndef __LINUX_KVM_POWERPC_H
#define __LINUX_KVM_POWERPC_H
-/* powerpc does not support KVM */
+#include <asm/types.h>
+
+struct kvm_regs {
+ __u64 pc;
+ __u64 cr;
+ __u64 ctr;
+ __u64 lr;
+ __u64 xer;
+ __u64 msr;
+ __u64 srr0;
+ __u64 srr1;
+ __u64 pid;
+
+ __u64 sprg0;
+ __u64 sprg1;
+ __u64 sprg2;
+ __u64 sprg3;
+ __u64 sprg4;
+ __u64 sprg5;
+ __u64 sprg6;
+ __u64 sprg7;
+
+ __u64 gpr[32];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+ __u64 fpr[32];
+};
-#endif
+#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/include/asm-powerpc/kvm_asm.h b/include/asm-powerpc/kvm_asm.h
new file mode 100644
index 00000000000..2197764796d
--- /dev/null
+++ b/include/asm-powerpc/kvm_asm.h
@@ -0,0 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_ASM_H__
+#define __POWERPC_KVM_ASM_H__
+
+/* IVPR must be 64KiB-aligned. */
+#define VCPU_SIZE_ORDER 4
+#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
+#define VCPU_TLB_PGSZ PPC44x_TLB_64K
+#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
+
+#define BOOKE_INTERRUPT_CRITICAL 0
+#define BOOKE_INTERRUPT_MACHINE_CHECK 1
+#define BOOKE_INTERRUPT_DATA_STORAGE 2
+#define BOOKE_INTERRUPT_INST_STORAGE 3
+#define BOOKE_INTERRUPT_EXTERNAL 4
+#define BOOKE_INTERRUPT_ALIGNMENT 5
+#define BOOKE_INTERRUPT_PROGRAM 6
+#define BOOKE_INTERRUPT_FP_UNAVAIL 7
+#define BOOKE_INTERRUPT_SYSCALL 8
+#define BOOKE_INTERRUPT_AP_UNAVAIL 9
+#define BOOKE_INTERRUPT_DECREMENTER 10
+#define BOOKE_INTERRUPT_FIT 11
+#define BOOKE_INTERRUPT_WATCHDOG 12
+#define BOOKE_INTERRUPT_DTLB_MISS 13
+#define BOOKE_INTERRUPT_ITLB_MISS 14
+#define BOOKE_INTERRUPT_DEBUG 15
+#define BOOKE_MAX_INTERRUPT 15
+
+#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+
+#define RESUME_GUEST 0
+#define RESUME_GUEST_NV RESUME_FLAG_NV
+#define RESUME_HOST RESUME_FLAG_HOST
+#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV)
+
+#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h
new file mode 100644
index 00000000000..04ffbb8e0a3
--- /dev/null
+++ b/include/asm-powerpc/kvm_host.h
@@ -0,0 +1,152 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_HOST_H__
+#define __POWERPC_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <asm/kvm_asm.h>
+
+#define KVM_MAX_VCPUS 1
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+/* We don't currently support large pages. */
+#define KVM_PAGES_PER_HPAGE (1<<31)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u32 sum_exits;
+ u32 mmio_exits;
+ u32 dcr_exits;
+ u32 signal_exits;
+ u32 light_exits;
+ /* Account for special types of light exits: */
+ u32 itlb_real_miss_exits;
+ u32 itlb_virt_miss_exits;
+ u32 dtlb_real_miss_exits;
+ u32 dtlb_virt_miss_exits;
+ u32 syscall_exits;
+ u32 isi_exits;
+ u32 dsi_exits;
+ u32 emulated_inst_exits;
+ u32 dec_exits;
+ u32 ext_intr_exits;
+};
+
+struct tlbe {
+ u32 tid; /* Only the low 8 bits are used. */
+ u32 word0;
+ u32 word1;
+ u32 word2;
+};
+
+struct kvm_arch {
+};
+
+struct kvm_vcpu_arch {
+ /* Unmodified copy of the guest's TLB. */
+ struct tlbe guest_tlb[PPC44x_TLB_SIZE];
+ /* TLB that's actually used when the guest is running. */
+ struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
+ /* Pages which are referenced in the shadow TLB. */
+ struct page *shadow_pages[PPC44x_TLB_SIZE];
+ /* Copy of the host's TLB. */
+ struct tlbe host_tlb[PPC44x_TLB_SIZE];
+
+ u32 host_stack;
+ u32 host_pid;
+
+ u64 fpr[32];
+ u32 gpr[32];
+
+ u32 pc;
+ u32 cr;
+ u32 ctr;
+ u32 lr;
+ u32 xer;
+
+ u32 msr;
+ u32 mmucr;
+ u32 sprg0;
+ u32 sprg1;
+ u32 sprg2;
+ u32 sprg3;
+ u32 sprg4;
+ u32 sprg5;
+ u32 sprg6;
+ u32 sprg7;
+ u32 srr0;
+ u32 srr1;
+ u32 csrr0;
+ u32 csrr1;
+ u32 dsrr0;
+ u32 dsrr1;
+ u32 dear;
+ u32 esr;
+ u32 dec;
+ u32 decar;
+ u32 tbl;
+ u32 tbu;
+ u32 tcr;
+ u32 tsr;
+ u32 ivor[16];
+ u32 ivpr;
+ u32 pir;
+ u32 pid;
+ u32 pvr;
+ u32 ccr0;
+ u32 ccr1;
+ u32 dbcr0;
+ u32 dbcr1;
+
+ u32 last_inst;
+ u32 fault_dear;
+ u32 fault_esr;
+ gpa_t paddr_accessed;
+
+ u8 io_gpr; /* GPR used as IO source/target */
+ u8 mmio_is_bigendian;
+ u8 dcr_needed;
+ u8 dcr_is_write;
+
+ u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
+
+ struct timer_list dec_timer;
+ unsigned long pending_exceptions;
+};
+
+struct kvm_guest_debug {
+ int enabled;
+ unsigned long bp[4];
+ int singlestep;
+};
+
+#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/include/asm-powerpc/kvm_para.h b/include/asm-powerpc/kvm_para.h
new file mode 100644
index 00000000000..2d48f6a63d0
--- /dev/null
+++ b/include/asm-powerpc/kvm_para.h
@@ -0,0 +1,37 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PARA_H__
+#define __POWERPC_KVM_PARA_H__
+
+#ifdef __KERNEL__
+
+static inline int kvm_para_available(void)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h
new file mode 100644
index 00000000000..7ac820308a7
--- /dev/null
+++ b/include/asm-powerpc/kvm_ppc.h
@@ -0,0 +1,88 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PPC_H__
+#define __POWERPC_KVM_PPC_H__
+
+/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
+ * dependencies. */
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+
+struct kvm_tlb {
+ struct tlbe guest_tlb[PPC44x_TLB_SIZE];
+ struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
+};
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
+ EMULATE_DO_DCR, /* kvm_run filled with DCR request */
+ EMULATE_FAIL, /* can't emulate this instruction */
+};
+
+extern const unsigned char exception_priority[];
+extern const unsigned char priority_exception[];
+
+extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern char kvmppc_handlers_start[];
+extern unsigned long kvmppc_handler_len;
+
+extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_bigendian);
+extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ u32 val, unsigned int bytes, int is_bigendian);
+
+extern int kvmppc_emulate_instruction(struct kvm_run *run,
+ struct kvm_vcpu *vcpu);
+
+extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
+ u64 asid, u32 flags);
+extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid);
+extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
+
+extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
+
+static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
+{
+ unsigned int priority = exception_priority[exception];
+ set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
+{
+ unsigned int priority = exception_priority[exception];
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+ if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
+ kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
+
+ vcpu->arch.msr = new_msr;
+}
+
+#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/include/asm-powerpc/mmu-44x.h b/include/asm-powerpc/mmu-44x.h
index c8b02d97f75..a825524c981 100644
--- a/include/asm-powerpc/mmu-44x.h
+++ b/include/asm-powerpc/mmu-44x.h
@@ -53,6 +53,8 @@
#ifndef __ASSEMBLY__
+extern unsigned int tlb_44x_hwater;
+
typedef struct {
unsigned long id;
unsigned long vdso_base;
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
index e92b429d2be..13c9805349f 100644
--- a/include/asm-s390/Kbuild
+++ b/include/asm-s390/Kbuild
@@ -7,6 +7,7 @@ header-y += tape390.h
header-y += ucontext.h
header-y += vtoc.h
header-y += zcrypt.h
+header-y += kvm.h
unifdef-y += cmb.h
unifdef-y += debug.h
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 965394e6945..b4eb24ab5af 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -769,6 +769,7 @@ static inline int sched_find_first_bit(unsigned long *b)
}
#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
diff --git a/include/asm-s390/kvm.h b/include/asm-s390/kvm.h
index 573f2a35138..d74002f9579 100644
--- a/include/asm-s390/kvm.h
+++ b/include/asm-s390/kvm.h
@@ -1,6 +1,45 @@
#ifndef __LINUX_KVM_S390_H
#define __LINUX_KVM_S390_H
-/* s390 does not support KVM */
+/*
+ * asm-s390/kvm.h - KVM s390 specific structures and definitions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+#include <asm/types.h>
+
+/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
+struct kvm_pic_state {
+ /* no PIC for s390 */
+};
+
+struct kvm_ioapic_state {
+ /* no IOAPIC for s390 */
+};
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+ /* general purpose regs for s390 */
+ __u64 gprs[16];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+ __u32 acrs[16];
+ __u64 crs[16];
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+ __u32 fpc;
+ __u64 fprs[16];
+};
#endif
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h
new file mode 100644
index 00000000000..f8204a4f2e0
--- /dev/null
+++ b/include/asm-s390/kvm_host.h
@@ -0,0 +1,234 @@
+/*
+ * asm-s390/kvm_host.h - definition for kernel virtual machines on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+
+#ifndef ASM_KVM_HOST_H
+#define ASM_KVM_HOST_H
+#include <linux/kvm_host.h>
+#include <asm/debug.h>
+
+#define KVM_MAX_VCPUS 64
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+struct kvm_guest_debug {
+};
+
+struct sca_entry {
+ atomic_t scn;
+ __u64 reserved;
+ __u64 sda;
+ __u64 reserved2[2];
+} __attribute__((packed));
+
+
+struct sca_block {
+ __u64 ipte_control;
+ __u64 reserved[5];
+ __u64 mcn;
+ __u64 reserved2;
+ struct sca_entry cpu[64];
+} __attribute__((packed));
+
+#define KVM_PAGES_PER_HPAGE 256
+
+#define CPUSTAT_HOST 0x80000000
+#define CPUSTAT_WAIT 0x10000000
+#define CPUSTAT_ECALL_PEND 0x08000000
+#define CPUSTAT_STOP_INT 0x04000000
+#define CPUSTAT_IO_INT 0x02000000
+#define CPUSTAT_EXT_INT 0x01000000
+#define CPUSTAT_RUNNING 0x00800000
+#define CPUSTAT_RETAINED 0x00400000
+#define CPUSTAT_TIMING_SUB 0x00020000
+#define CPUSTAT_SIE_SUB 0x00010000
+#define CPUSTAT_RRF 0x00008000
+#define CPUSTAT_SLSV 0x00004000
+#define CPUSTAT_SLSR 0x00002000
+#define CPUSTAT_ZARCH 0x00000800
+#define CPUSTAT_MCDS 0x00000100
+#define CPUSTAT_SM 0x00000080
+#define CPUSTAT_G 0x00000008
+#define CPUSTAT_J 0x00000002
+#define CPUSTAT_P 0x00000001
+
+struct sie_block {
+ atomic_t cpuflags; /* 0x0000 */
+ __u32 prefix; /* 0x0004 */
+ __u8 reserved8[32]; /* 0x0008 */
+ __u64 cputm; /* 0x0028 */
+ __u64 ckc; /* 0x0030 */
+ __u64 epoch; /* 0x0038 */
+ __u8 reserved40[4]; /* 0x0040 */
+#define LCTL_CR0 0x8000
+ __u16 lctl; /* 0x0044 */
+ __s16 icpua; /* 0x0046 */
+ __u32 ictl; /* 0x0048 */
+ __u32 eca; /* 0x004c */
+ __u8 icptcode; /* 0x0050 */
+ __u8 reserved51; /* 0x0051 */
+ __u16 ihcpu; /* 0x0052 */
+ __u8 reserved54[2]; /* 0x0054 */
+ __u16 ipa; /* 0x0056 */
+ __u32 ipb; /* 0x0058 */
+ __u32 scaoh; /* 0x005c */
+ __u8 reserved60; /* 0x0060 */
+ __u8 ecb; /* 0x0061 */
+ __u8 reserved62[2]; /* 0x0062 */
+ __u32 scaol; /* 0x0064 */
+ __u8 reserved68[4]; /* 0x0068 */
+ __u32 todpr; /* 0x006c */
+ __u8 reserved70[16]; /* 0x0070 */
+ __u64 gmsor; /* 0x0080 */
+ __u64 gmslm; /* 0x0088 */
+ psw_t gpsw; /* 0x0090 */
+ __u64 gg14; /* 0x00a0 */
+ __u64 gg15; /* 0x00a8 */
+ __u8 reservedb0[30]; /* 0x00b0 */
+ __u16 iprcc; /* 0x00ce */
+ __u8 reservedd0[48]; /* 0x00d0 */
+ __u64 gcr[16]; /* 0x0100 */
+ __u64 gbea; /* 0x0180 */
+ __u8 reserved188[120]; /* 0x0188 */
+} __attribute__((packed));
+
+struct kvm_vcpu_stat {
+ u32 exit_userspace;
+ u32 exit_external_request;
+ u32 exit_external_interrupt;
+ u32 exit_stop_request;
+ u32 exit_validity;
+ u32 exit_instruction;
+ u32 instruction_lctl;
+ u32 instruction_lctg;
+ u32 exit_program_interruption;
+ u32 exit_instr_and_program;
+ u32 deliver_emergency_signal;
+ u32 deliver_service_signal;
+ u32 deliver_virtio_interrupt;
+ u32 deliver_stop_signal;
+ u32 deliver_prefix_signal;
+ u32 deliver_restart_signal;
+ u32 deliver_program_int;
+ u32 exit_wait_state;
+ u32 instruction_stidp;
+ u32 instruction_spx;
+ u32 instruction_stpx;
+ u32 instruction_stap;
+ u32 instruction_storage_key;
+ u32 instruction_stsch;
+ u32 instruction_chsc;
+ u32 instruction_stsi;
+ u32 instruction_stfl;
+ u32 instruction_sigp_sense;
+ u32 instruction_sigp_emergency;
+ u32 instruction_sigp_stop;
+ u32 instruction_sigp_arch;
+ u32 instruction_sigp_prefix;
+ u32 instruction_sigp_restart;
+ u32 diagnose_44;
+};
+
+struct io_info {
+ __u16 subchannel_id; /* 0x0b8 */
+ __u16 subchannel_nr; /* 0x0ba */
+ __u32 io_int_parm; /* 0x0bc */
+ __u32 io_int_word; /* 0x0c0 */
+};
+
+struct ext_info {
+ __u32 ext_params;
+ __u64 ext_params2;
+};
+
+#define PGM_OPERATION 0x01
+#define PGM_PRIVILEGED_OPERATION 0x02
+#define PGM_EXECUTE 0x03
+#define PGM_PROTECTION 0x04
+#define PGM_ADDRESSING 0x05
+#define PGM_SPECIFICATION 0x06
+#define PGM_DATA 0x07
+
+struct pgm_info {
+ __u16 code;
+};
+
+struct prefix_info {
+ __u32 address;
+};
+
+struct interrupt_info {
+ struct list_head list;
+ u64 type;
+ union {
+ struct io_info io;
+ struct ext_info ext;
+ struct pgm_info pgm;
+ struct prefix_info prefix;
+ };
+};
+
+/* for local_interrupt.action_flags */
+#define ACTION_STORE_ON_STOP 1
+#define ACTION_STOP_ON_STOP 2
+
+struct local_interrupt {
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t active;
+ struct float_interrupt *float_int;
+ int timer_due; /* event indicator for waitqueue below */
+ wait_queue_head_t wq;
+ atomic_t *cpuflags;
+ unsigned int action_bits;
+};
+
+struct float_interrupt {
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t active;
+ int next_rr_cpu;
+ unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)];
+ struct local_interrupt *local_int[64];
+};
+
+
+struct kvm_vcpu_arch {
+ struct sie_block *sie_block;
+ unsigned long guest_gprs[16];
+ s390_fp_regs host_fpregs;
+ unsigned int host_acrs[NUM_ACRS];
+ s390_fp_regs guest_fpregs;
+ unsigned int guest_acrs[NUM_ACRS];
+ struct local_interrupt local_int;
+ struct timer_list ckc_timer;
+ union {
+ cpuid_t cpu_id;
+ u64 stidp_data;
+ };
+};
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_arch{
+ unsigned long guest_origin;
+ unsigned long guest_memsize;
+ struct sca_block *sca;
+ debug_info_t *dbf;
+ struct float_interrupt float_int;
+};
+
+extern int sie64a(struct sie_block *, __u64 *);
+#endif
diff --git a/include/asm-s390/kvm_para.h b/include/asm-s390/kvm_para.h
new file mode 100644
index 00000000000..2c503796b61
--- /dev/null
+++ b/include/asm-s390/kvm_para.h
@@ -0,0 +1,150 @@
+/*
+ * asm-s390/kvm_para.h - definition for paravirtual devices on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef __S390_KVM_PARA_H
+#define __S390_KVM_PARA_H
+
+/*
+ * Hypercalls for KVM on s390. The calling convention is similar to the
+ * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1
+ * as hypercall number and R7 as parameter 6. The return value is
+ * written to R2. We use the diagnose instruction as hypercall. To avoid
+ * conflicts with existing diagnoses for LPAR and z/VM, we do not use
+ * the instruction encoded number, but specify the number in R1 and
+ * use 0x500 as KVM hypercall
+ *
+ * Copyright IBM Corp. 2007,2008
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+static inline long kvm_hypercall0(unsigned long nr)
+{
+ register unsigned long __nr asm("1") = nr;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr): "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
+ unsigned long p2)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
+ : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3) : "memory", "cc");
+ return __rc;
+}
+
+
+static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register unsigned long __p5 asm("6") = p5;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4), "d" (__p5) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5,
+ unsigned long p6)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register unsigned long __p5 asm("6") = p5;
+ register unsigned long __p6 asm("7") = p6;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
+ : "memory", "cc");
+ return __rc;
+}
+
+/* kvm on s390 is always paravirtualization enabled */
+static inline int kvm_para_available(void)
+{
+ return 1;
+}
+
+/* No feature bits are currently assigned for kvm on s390 */
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif /* __S390_KVM_PARA_H */
diff --git a/include/asm-s390/kvm_virtio.h b/include/asm-s390/kvm_virtio.h
new file mode 100644
index 00000000000..5c871a990c2
--- /dev/null
+++ b/include/asm-s390/kvm_virtio.h
@@ -0,0 +1,53 @@
+/*
+ * kvm_virtio.h - definition for virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef __KVM_S390_VIRTIO_H
+#define __KVM_S390_VIRTIO_H
+
+#include <linux/types.h>
+
+struct kvm_device_desc {
+ /* The device type: console, network, disk etc. Type 0 terminates. */
+ __u8 type;
+ /* The number of virtqueues (first in config array) */
+ __u8 num_vq;
+ /*
+ * The number of bytes of feature bits. Multiply by 2: one for host
+ * features and one for guest acknowledgements.
+ */
+ __u8 feature_len;
+ /* The number of bytes of the config array after virtqueues. */
+ __u8 config_len;
+ /* A status byte, written by the Guest. */
+ __u8 status;
+ __u8 config[0];
+};
+
+/*
+ * This is how we expect the device configuration field for a virtqueue
+ * to be laid out in config space.
+ */
+struct kvm_vqconfig {
+ /* The token returned with an interrupt. Set by the guest */
+ __u64 token;
+ /* The address of the virtio ring */
+ __u64 address;
+ /* The number of entries in the virtio_ring */
+ __u16 num;
+
+};
+
+#define KVM_S390_VIRTIO_NOTIFY 0
+#define KVM_S390_VIRTIO_RESET 1
+#define KVM_S390_VIRTIO_SET_STATUS 2
+
+#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 5de3efb3144..0bc51d52a89 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -381,27 +381,32 @@ struct _lowcore
/* whether the kernel died with panic() or not */
__u32 panic_magic; /* 0xe00 */
- __u8 pad13[0x1200-0xe04]; /* 0xe04 */
+ __u8 pad13[0x11b8-0xe04]; /* 0xe04 */
+
+ /* 64 bit extparam used for pfault, diag 250 etc */
+ __u64 ext_params2; /* 0x11B8 */
+
+ __u8 pad14[0x1200-0x11C0]; /* 0x11C0 */
/* System info area */
__u64 floating_pt_save_area[16]; /* 0x1200 */
__u64 gpregs_save_area[16]; /* 0x1280 */
__u32 st_status_fixed_logout[4]; /* 0x1300 */
- __u8 pad14[0x1318-0x1310]; /* 0x1310 */
+ __u8 pad15[0x1318-0x1310]; /* 0x1310 */
__u32 prefixreg_save_area; /* 0x1318 */
__u32 fpt_creg_save_area; /* 0x131c */
- __u8 pad15[0x1324-0x1320]; /* 0x1320 */
+ __u8 pad16[0x1324-0x1320]; /* 0x1320 */
__u32 tod_progreg_save_area; /* 0x1324 */
__u32 cpu_timer_save_area[2]; /* 0x1328 */
__u32 clock_comp_save_area[2]; /* 0x1330 */
- __u8 pad16[0x1340-0x1338]; /* 0x1338 */
+ __u8 pad17[0x1340-0x1338]; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
__u64 cregs_save_area[16]; /* 0x1380 */
/* align to the top of the prefix area */
- __u8 pad17[0x2000-0x1400]; /* 0x1400 */
+ __u8 pad18[0x2000-0x1400]; /* 0x1400 */
#endif /* !__s390x__ */
} __attribute__((packed)); /* End structure*/
diff --git a/include/asm-s390/mmu.h b/include/asm-s390/mmu.h
index 1698e29c5b2..5dd5e7b3476 100644
--- a/include/asm-s390/mmu.h
+++ b/include/asm-s390/mmu.h
@@ -7,6 +7,7 @@ typedef struct {
unsigned long asce_bits;
unsigned long asce_limit;
int noexec;
+ int pgstes;
} mm_context_t;
#endif
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index b5a34c6f91a..4c2fbf48c9c 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -20,7 +20,13 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
- mm->context.noexec = s390_noexec;
+ if (current->mm->context.pgstes) {
+ mm->context.noexec = 0;
+ mm->context.pgstes = 1;
+ } else {
+ mm->context.noexec = s390_noexec;
+ mm->context.pgstes = 0;
+ }
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 65154dc9a9e..4c0698c0dda 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -30,6 +30,7 @@
*/
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
+#include <asm/bitops.h>
#include <asm/bug.h>
#include <asm/processor.h>
@@ -258,6 +259,13 @@ extern char empty_zero_page[PAGE_SIZE];
* swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
*/
+/* Page status table bits for virtualization */
+#define RCP_PCL_BIT 55
+#define RCP_HR_BIT 54
+#define RCP_HC_BIT 53
+#define RCP_GR_BIT 50
+#define RCP_GC_BIT 49
+
#ifndef __s390x__
/* Bits in the segment table address-space-control-element */
@@ -513,6 +521,48 @@ static inline int pte_file(pte_t pte)
#define __HAVE_ARCH_PTE_SAME
#define pte_same(a,b) (pte_val(a) == pte_val(b))
+static inline void rcp_lock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ preempt_disable();
+ while (test_and_set_bit(RCP_PCL_BIT, pgste))
+ ;
+#endif
+}
+
+static inline void rcp_unlock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ clear_bit(RCP_PCL_BIT, pgste);
+ preempt_enable();
+#endif
+}
+
+/* forward declaration for SetPageUptodate in page-flags.h*/
+static inline void page_clear_dirty(struct page *page);
+#include <linux/page-flags.h>
+
+static inline void ptep_rcp_copy(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ struct page *page = virt_to_page(pte_val(*ptep));
+ unsigned int skey;
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+ skey = page_get_storage_key(page_to_phys(page));
+ if (skey & _PAGE_CHANGED)
+ set_bit_simple(RCP_GC_BIT, pgste);
+ if (skey & _PAGE_REFERENCED)
+ set_bit_simple(RCP_GR_BIT, pgste);
+ if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
+ SetPageDirty(page);
+ if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
+ SetPageReferenced(page);
+#endif
+}
+
/*
* query functions pte_write/pte_dirty/pte_young only work if
* pte_present() is true. Undefined behaviour if not..
@@ -599,6 +649,8 @@ static inline void pmd_clear(pmd_t *pmd)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
+ if (mm->context.pgstes)
+ ptep_rcp_copy(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
@@ -667,6 +719,24 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+#ifdef CONFIG_PGSTE
+ unsigned long physpage;
+ int young;
+ unsigned long *pgste;
+
+ if (!vma->vm_mm->context.pgstes)
+ return 0;
+ physpage = pte_val(*ptep) & PAGE_MASK;
+ pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+ young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
+ rcp_lock(ptep);
+ if (young)
+ set_bit_simple(RCP_GR_BIT, pgste);
+ young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
+ rcp_unlock(ptep);
+ return young;
+#endif
return 0;
}
@@ -674,7 +744,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- /* No need to flush TLB; bits are in storage key */
+ /* No need to flush TLB
+ * On s390 reference bits are in storage key and never in TLB
+ * With virtualization we handle the reference bit, without we
+ * we can simply return */
+#ifdef CONFIG_PGSTE
+ return ptep_test_and_clear_young(vma, address, ptep);
+#endif
return 0;
}
@@ -693,15 +769,25 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
: "=m" (*ptep) : "m" (*ptep),
"a" (pto), "a" (address));
}
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
}
static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
+ if (mm->context.pgstes) {
+ rcp_lock(ptep);
+ __ptep_ipte(address, ptep);
+ ptep_rcp_copy(ptep);
+ pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ rcp_unlock(ptep);
+ return;
+ }
__ptep_ipte(address, ptep);
- if (mm->context.noexec)
+ pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ if (mm->context.noexec) {
__ptep_ipte(address, ptep + PTRS_PER_PTE);
+ pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
+ }
}
/*
@@ -966,6 +1052,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
extern int add_shared_memory(unsigned long start, unsigned long size);
extern int remove_shared_memory(unsigned long start, unsigned long size);
+extern int s390_enable_sie(void);
/*
* No page table caches to initialise
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index a76a6b8fd88..aaf4b518b94 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -62,6 +62,7 @@ extern unsigned long machine_flags;
#define MACHINE_IS_VM (machine_flags & 1)
#define MACHINE_IS_P390 (machine_flags & 4)
#define MACHINE_HAS_MVPG (machine_flags & 16)
+#define MACHINE_IS_KVM (machine_flags & 64)
#define MACHINE_HAS_IDTE (machine_flags & 128)
#define MACHINE_HAS_DIAG9C (machine_flags & 256)
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
index b6ba5a60dec..d7d382f63ee 100644
--- a/include/asm-sh/bitops.h
+++ b/include/asm-sh/bitops.h
@@ -95,6 +95,7 @@ static inline unsigned long ffz(unsigned long word)
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 982ce8992b9..11f9d8146cd 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -34,6 +34,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h
index 9cbd9a668af..b4a46b7be79 100644
--- a/include/asm-x86/bios_ebda.h
+++ b/include/asm-x86/bios_ebda.h
@@ -1,6 +1,8 @@
#ifndef _MACH_BIOS_EBDA_H
#define _MACH_BIOS_EBDA_H
+#include <asm/io.h>
+
/*
* there is a real-mode segmented pointer pointing to the
* 4K EBDA area at 0x40E.
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 1ae7b270a1e..b81a4d4d333 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -62,12 +62,9 @@ static inline void set_bit(int nr, volatile void *addr)
*/
static inline void __set_bit(int nr, volatile void *addr)
{
- asm volatile("bts %1,%0"
- : ADDR
- : "Ir" (nr) : "memory");
+ asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
-
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
@@ -297,19 +294,145 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
static int test_bit(int nr, const volatile unsigned long *addr);
#endif
-#define test_bit(nr,addr) \
- (__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
+#define test_bit(nr, addr) \
+ (__builtin_constant_p((nr)) \
+ ? constant_test_bit((nr), (addr)) \
+ : variable_test_bit((nr), (addr)))
+
+/**
+ * __ffs - find first set bit in word
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ asm("bsf %1,%0"
+ : "=r" (word)
+ : "rm" (word));
+ return word;
+}
+
+/**
+ * ffz - find first zero bit in word
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+ asm("bsf %1,%0"
+ : "=r" (word)
+ : "r" (~word));
+ return word;
+}
+
+/*
+ * __fls: find last set bit in word
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ asm("bsr %1,%0"
+ : "=r" (word)
+ : "rm" (word));
+ return word;
+}
+
+#ifdef __KERNEL__
+/**
+ * ffs - find first set bit in word
+ * @x: the word to search
+ *
+ * This is defined the same way as the libc and compiler builtin ffs
+ * routines, therefore differs in spirit from the other bitops.
+ *
+ * ffs(value) returns 0 if value is 0 or the position of the first
+ * set bit if value is nonzero. The first (least significant) bit
+ * is at position 1.
+ */
+static inline int ffs(int x)
+{
+ int r;
+#ifdef CONFIG_X86_CMOV
+ asm("bsfl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=r" (r) : "rm" (x), "r" (-1));
+#else
+ asm("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+#endif
+ return r + 1;
+}
+
+/**
+ * fls - find last set bit in word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffs, but returns the position of the most significant set bit.
+ *
+ * fls(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 32.
+ */
+static inline int fls(int x)
+{
+ int r;
+#ifdef CONFIG_X86_CMOV
+ asm("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
+#else
+ asm("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+#endif
+ return r + 1;
+}
+#endif /* __KERNEL__ */
#undef BASE_ADDR
#undef BIT_ADDR
#undef ADDR
-#ifdef CONFIG_X86_32
-# include "bitops_32.h"
-#else
-# include "bitops_64.h"
-#endif
+static inline void set_bit_string(unsigned long *bitmap,
+ unsigned long i, int len)
+{
+ unsigned long end = i + len;
+ while (i < end) {
+ __set_bit(i, bitmap);
+ i++;
+ }
+}
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/sched.h>
+
+#define ARCH_HAS_FAST_MULTIPLIER 1
+
+#include <asm-generic/bitops/hweight.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/fls64.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/ext2-non-atomic.h>
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_bit((nr), (unsigned long *)(addr))
+
+#include <asm-generic/bitops/minix.h>
+#endif /* __KERNEL__ */
#endif /* _ASM_X86_BITOPS_H */
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
deleted file mode 100644
index 2513a81f82a..00000000000
--- a/include/asm-x86/bitops_32.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef _I386_BITOPS_H
-#define _I386_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit number of the first zero bit, not the number of the byte
- * containing a bit.
- */
-static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
-{
- int d0, d1, d2;
- int res;
-
- if (!size)
- return 0;
- /* This looks at memory.
- * Mark it volatile to tell gcc not to move it around
- */
- asm volatile("movl $-1,%%eax\n\t"
- "xorl %%edx,%%edx\n\t"
- "repe; scasl\n\t"
- "je 1f\n\t"
- "xorl -4(%%edi),%%eax\n\t"
- "subl $4,%%edi\n\t"
- "bsfl %%eax,%%edx\n"
- "1:\tsubl %%ebx,%%edi\n\t"
- "shll $3,%%edi\n\t"
- "addl %%edi,%%edx"
- : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
- : "1" ((size + 31) >> 5), "2" (addr),
- "b" (addr) : "memory");
- return res;
-}
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bit number to start searching at
- * @size: The maximum size to search
- */
-int find_next_zero_bit(const unsigned long *addr, int size, int offset);
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- __asm__("bsfl %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit number of the first set bit, not the number of the byte
- * containing a bit.
- */
-static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
-{
- unsigned x = 0;
-
- while (x < size) {
- unsigned long val = *addr++;
- if (val)
- return __ffs(val) + x;
- x += sizeof(*addr) << 3;
- }
- return x;
-}
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bit number to start searching at
- * @size: The maximum size to search
- */
-int find_next_bit(const unsigned long *addr, int size, int offset);
-
-/**
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
- __asm__("bsfl %1,%0"
- :"=r" (word)
- :"r" (~word));
- return word;
-}
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/sched.h>
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz() (man ffs).
- */
-static inline int ffs(int x)
-{
- int r;
-
- __asm__("bsfl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
- return r+1;
-}
-
-/**
- * fls - find last bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs().
- */
-static inline int fls(int x)
-{
- int r;
-
- __asm__("bsrl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
- return r+1;
-}
-
-#include <asm-generic/bitops/hweight.h>
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls64.h>
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/ext2-non-atomic.h>
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr), (unsigned long *)(addr))
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr), (unsigned long *)(addr))
-
-#include <asm-generic/bitops/minix.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _I386_BITOPS_H */
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
deleted file mode 100644
index 365f8207ea5..00000000000
--- a/include/asm-x86/bitops_64.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef _X86_64_BITOPS_H
-#define _X86_64_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
-extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
-extern long find_first_bit(const unsigned long *addr, unsigned long size);
-extern long find_next_bit(const unsigned long *addr, long size, long offset);
-
-/* return index of first bet set in val or max when no bit is set */
-static inline long __scanbit(unsigned long val, unsigned long max)
-{
- asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
- return val;
-}
-
-#define find_next_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
- find_next_bit(addr,size,off)))
-
-#define find_next_zero_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
- find_next_zero_bit(addr,size,off)))
-
-#define find_first_bit(addr, size) \
- ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
- ? (__scanbit(*(unsigned long *)(addr), (size))) \
- : find_first_bit((addr), (size))))
-
-#define find_first_zero_bit(addr, size) \
- ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
- ? (__scanbit(~*(unsigned long *)(addr), (size))) \
- : find_first_zero_bit((addr), (size))))
-
-static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
- int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __set_bit(i, bitmap);
- i++;
- }
-}
-
-/**
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
- __asm__("bsfq %1,%0"
- :"=r" (word)
- :"r" (~word));
- return word;
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- __asm__("bsfq %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
-}
-
-/*
- * __fls: find last bit set.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long __fls(unsigned long word)
-{
- __asm__("bsrq %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
-}
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/sched.h>
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static inline int ffs(int x)
-{
- int r;
-
- __asm__("bsfl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=r" (r) : "rm" (x), "r" (-1));
- return r+1;
-}
-
-/**
- * fls64 - find last bit set in 64 bit word
- * @x: the word to search
- *
- * This is defined the same way as fls.
- */
-static inline int fls64(__u64 x)
-{
- if (x == 0)
- return 0;
- return __fls(x) + 1;
-}
-
-/**
- * fls - find last bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs.
- */
-static inline int fls(int x)
-{
- int r;
-
- __asm__("bsrl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=&r" (r) : "rm" (x), "rm" (-1));
- return r+1;
-}
-
-#define ARCH_HAS_FAST_MULTIPLIER 1
-
-#include <asm-generic/bitops/hweight.h>
-
-#endif /* __KERNEL__ */
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/ext2-non-atomic.h>
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr), (unsigned long *)(addr))
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr), (unsigned long *)(addr))
-
-#include <asm-generic/bitops/minix.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _X86_64_BITOPS_H */
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
index 51151356840..e8659909e5f 100644
--- a/include/asm-x86/bootparam.h
+++ b/include/asm-x86/bootparam.h
@@ -9,6 +9,17 @@
#include <asm/ist.h>
#include <video/edid.h>
+/* setup data types */
+#define SETUP_NONE 0
+
+/* extensible setup data list node */
+struct setup_data {
+ u64 next;
+ u32 type;
+ u32 len;
+ u8 data[0];
+};
+
struct setup_header {
__u8 setup_sects;
__u16 root_flags;
@@ -46,6 +57,9 @@ struct setup_header {
__u32 cmdline_size;
__u32 hardware_subarch;
__u64 hardware_subarch_data;
+ __u32 payload_offset;
+ __u32 payload_length;
+ __u64 setup_data;
} __attribute__((packed));
struct sys_desc_table {
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h
index f478c57eb06..71c4d685d30 100644
--- a/include/asm-x86/e820_64.h
+++ b/include/asm-x86/e820_64.h
@@ -48,7 +48,8 @@ extern struct e820map e820;
extern void update_e820(void);
extern void reserve_early(unsigned long start, unsigned long end, char *name);
-extern void early_res_to_bootmem(void);
+extern void free_early(unsigned long start, unsigned long end);
+extern void early_res_to_bootmem(unsigned long start, unsigned long end);
#endif/*!__ASSEMBLY__*/
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index 0c9e17c73e0..d593e14f034 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -1,7 +1,7 @@
#ifndef __ASM_IO_APIC_H
#define __ASM_IO_APIC_H
-#include <asm/types.h>
+#include <linux/types.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
@@ -110,11 +110,13 @@ extern int nr_ioapic_registers[MAX_IO_APICS];
* MP-BIOS irq configuration table structures:
*/
+#define MP_MAX_IOAPIC_PIN 127
+
struct mp_ioapic_routing {
int apic_id;
int gsi_base;
int gsi_end;
- u32 pin_programmed[4];
+ DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
};
/* I/O APIC entries */
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 7a71120426a..80eefef2cc7 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -188,4 +188,45 @@ struct kvm_cpuid2 {
struct kvm_cpuid_entry2 entries[0];
};
+/* for KVM_GET_PIT and KVM_SET_PIT */
+struct kvm_pit_channel_state {
+ __u32 count; /* can be 65536 */
+ __u16 latched_count;
+ __u8 count_latched;
+ __u8 status_latched;
+ __u8 status;
+ __u8 read_state;
+ __u8 write_state;
+ __u8 write_latch;
+ __u8 rw_mode;
+ __u8 mode;
+ __u8 bcd;
+ __u8 gate;
+ __s64 count_load_time;
+};
+
+struct kvm_pit_state {
+ struct kvm_pit_channel_state channels[3];
+};
+
+#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
+#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
+#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
+#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
+#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
+#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
+#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
+#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
+#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
+#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
+#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
+#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
+#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
+#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
+#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
+#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
+#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
+#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
+#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
+
#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 68ee390b284..9d963cd6533 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -20,6 +20,13 @@
#include <asm/desc.h>
+#define KVM_MAX_VCPUS 16
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+#define KVM_PIO_PAGE_OFFSET 1
+
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
@@ -39,6 +46,13 @@
#define INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
+/* shadow tables are PAE even on non-PAE hosts */
+#define KVM_HPAGE_SHIFT 21
+#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
+#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
+
+#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
+
#define DE_VECTOR 0
#define UD_VECTOR 6
#define NM_VECTOR 7
@@ -48,6 +62,7 @@
#define SS_VECTOR 12
#define GP_VECTOR 13
#define PF_VECTOR 14
+#define MC_VECTOR 18
#define SELECTOR_TI_MASK (1 << 2)
#define SELECTOR_RPL_MASK 0x03
@@ -58,7 +73,8 @@
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
-#define KVM_NUM_MMU_PAGES 1024
+#define KVM_MMU_HASH_SHIFT 10
+#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
@@ -106,6 +122,12 @@ enum {
#define KVM_NR_MEM_OBJS 40
+struct kvm_guest_debug {
+ int enabled;
+ unsigned long bp[4];
+ int singlestep;
+};
+
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
@@ -140,6 +162,7 @@ union kvm_mmu_page_role {
unsigned pad_for_nice_hex_output:6;
unsigned metaphysical:1;
unsigned access:3;
+ unsigned invalid:1;
};
};
@@ -204,11 +227,6 @@ struct kvm_vcpu_arch {
u64 shadow_efer;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
-#define VCPU_MP_STATE_RUNNABLE 0
-#define VCPU_MP_STATE_UNINITIALIZED 1
-#define VCPU_MP_STATE_INIT_RECEIVED 2
-#define VCPU_MP_STATE_SIPI_RECEIVED 3
-#define VCPU_MP_STATE_HALTED 4
int mp_state;
int sipi_vector;
u64 ia32_misc_enable_msr;
@@ -226,8 +244,9 @@ struct kvm_vcpu_arch {
u64 *last_pte_updated;
struct {
- gfn_t gfn; /* presumed gfn during guest pte update */
- struct page *page; /* page corresponding to that gfn */
+ gfn_t gfn; /* presumed gfn during guest pte update */
+ pfn_t pfn; /* pfn corresponding to that gfn */
+ int largepage;
} update_pte;
struct i387_fxsave_struct host_fx_image;
@@ -261,6 +280,11 @@ struct kvm_vcpu_arch {
/* emulate context */
struct x86_emulate_ctxt emulate_ctxt;
+
+ gpa_t time;
+ struct kvm_vcpu_time_info hv_clock;
+ unsigned int time_offset;
+ struct page *time_page;
};
struct kvm_mem_alias {
@@ -283,10 +307,13 @@ struct kvm_arch{
struct list_head active_mmu_pages;
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
+ struct kvm_pit *vpit;
int round_robin_prev_vcpu;
unsigned int tss_addr;
struct page *apic_access_page;
+
+ gpa_t wall_clock;
};
struct kvm_vm_stat {
@@ -298,6 +325,7 @@ struct kvm_vm_stat {
u32 mmu_recycled;
u32 mmu_cache_miss;
u32 remote_tlb_flush;
+ u32 lpages;
};
struct kvm_vcpu_stat {
@@ -320,6 +348,7 @@ struct kvm_vcpu_stat {
u32 fpu_reload;
u32 insn_emulation;
u32 insn_emulation_fail;
+ u32 hypercalls;
};
struct descriptor_table {
@@ -355,6 +384,7 @@ struct kvm_x86_ops {
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
void (*get_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
+ int (*get_cpl)(struct kvm_vcpu *vcpu);
void (*set_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -410,6 +440,15 @@ void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
+
+int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const void *val, int bytes);
+int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
+ gpa_t addr, unsigned long *ret);
+
+extern bool tdp_enabled;
+
enum emulation_result {
EMULATE_DONE, /* no further processing */
EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
@@ -429,6 +468,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
unsigned long *rflags);
+void kvm_enable_efer_bits(u64);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
@@ -448,12 +488,14 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
unsigned long value);
-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
-unsigned long get_cr8(struct kvm_vcpu *vcpu);
-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
+
+void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
+void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
@@ -491,6 +533,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
+void kvm_enable_tdp(void);
+
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
@@ -600,6 +644,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
+#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
#define MSR_IA32_TIME_STAMP_COUNTER 0x010
@@ -610,4 +655,30 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define RMODE_TSS_SIZE \
(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
+enum {
+ TASK_SWITCH_CALL = 0,
+ TASK_SWITCH_IRET = 1,
+ TASK_SWITCH_JMP = 2,
+ TASK_SWITCH_GATE = 3,
+};
+
+#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 5, d1, d2, d3, d4, d5)
+#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 4, d1, d2, d3, d4, 0)
+#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 3, d1, d2, d3, 0, 0)
+#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 2, d1, d2, 0, 0, 0)
+#define KVMTRACE_1D(evt, vcpu, d1, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 1, d1, 0, 0, 0, 0)
+#define KVMTRACE_0D(evt, vcpu, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 0, 0, 0, 0, 0, 0)
+
#endif
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index c6f3fd8d8c5..50984594207 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -10,10 +10,65 @@
* paravirtualization, the appropriate feature bit should be checked.
*/
#define KVM_CPUID_FEATURES 0x40000001
+#define KVM_FEATURE_CLOCKSOURCE 0
+#define KVM_FEATURE_NOP_IO_DELAY 1
+#define KVM_FEATURE_MMU_OP 2
+
+#define MSR_KVM_WALL_CLOCK 0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+#define KVM_MAX_MMU_OP_BATCH 32
+
+/* Operations for KVM_HC_MMU_OP */
+#define KVM_MMU_OP_WRITE_PTE 1
+#define KVM_MMU_OP_FLUSH_TLB 2
+#define KVM_MMU_OP_RELEASE_PT 3
+
+/* Payload for KVM_HC_MMU_OP */
+struct kvm_mmu_op_header {
+ __u32 op;
+ __u32 pad;
+};
+
+struct kvm_mmu_op_write_pte {
+ struct kvm_mmu_op_header header;
+ __u64 pte_phys;
+ __u64 pte_val;
+};
+
+struct kvm_mmu_op_flush_tlb {
+ struct kvm_mmu_op_header header;
+};
+
+struct kvm_mmu_op_release_pt {
+ struct kvm_mmu_op_header header;
+ __u64 pt_phys;
+};
#ifdef __KERNEL__
#include <asm/processor.h>
+/* xen binary-compatible interface. See xen headers for details */
+struct kvm_vcpu_time_info {
+ uint32_t version;
+ uint32_t pad0;
+ uint64_t tsc_timestamp;
+ uint64_t system_time;
+ uint32_t tsc_to_system_mul;
+ int8_t tsc_shift;
+ int8_t pad[3];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct kvm_wall_clock {
+ uint32_t wc_version;
+ uint32_t wc_sec;
+ uint32_t wc_nsec;
+} __attribute__((__packed__));
+
+
+extern void kvmclock_init(void);
+
+
/* This instruction is vmcall. On non-VT architectures, it will generate a
* trap that we will then rewrite to the appropriate instruction.
*/
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 3ff2c5bff93..56d0e1fa025 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -33,7 +33,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*((volatile long *) phys_to_virt(0x467)) = 0;
}
-static inline void smpboot_setup_io_apic(void)
+static inline void __init smpboot_setup_io_apic(void)
{
/*
* Here we can be sure that there is an IO-APIC in the system. Let's
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 168b6447cf1..577ab79c4c2 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -198,16 +198,16 @@ do { \
*/
#define update_mmu_cache(vma, address, pte) do { } while (0)
-void native_pagetable_setup_start(pgd_t *base);
-void native_pagetable_setup_done(pgd_t *base);
+extern void native_pagetable_setup_start(pgd_t *base);
+extern void native_pagetable_setup_done(pgd_t *base);
#ifndef CONFIG_PARAVIRT
-static inline void paravirt_pagetable_setup_start(pgd_t *base)
+static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
{
native_pagetable_setup_start(base);
}
-static inline void paravirt_pagetable_setup_done(pgd_t *base)
+static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
{
native_pagetable_setup_done(base);
}
diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h
index fe312a5ba20..bb7133dc155 100644
--- a/include/asm-x86/posix_types.h
+++ b/include/asm-x86/posix_types.h
@@ -1,5 +1,11 @@
#ifdef __KERNEL__
-# if defined(CONFIG_X86_32) || defined(__i386__)
+# ifdef CONFIG_X86_32
+# include "posix_types_32.h"
+# else
+# include "posix_types_64.h"
+# endif
+#else
+# ifdef __i386__
# include "posix_types_32.h"
# else
# include "posix_types_64.h"
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index e6bf92ddeb2..2e7974ec77e 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -118,7 +118,6 @@ struct cpuinfo_x86 {
#define X86_VENDOR_CYRIX 1
#define X86_VENDOR_AMD 2
#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
#define X86_VENDOR_CENTAUR 5
#define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8
@@ -723,6 +722,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
+ trace_hardirqs_on();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 24ec061566c..9f922b0b95d 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -231,6 +231,8 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
+#define __ARCH_WANT_COMPAT_SYS_PTRACE
+
#endif /* __KERNEL__ */
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index 6b5233b4f84..e63741f1939 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -15,5 +15,7 @@ struct machine_ops {
extern struct machine_ops machine_ops;
void machine_real_restart(unsigned char *code, int length);
+void native_machine_crash_shutdown(struct pt_regs *regs);
+void native_machine_shutdown(void);
#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
index 3451c576e6a..c9448bd8968 100644
--- a/include/asm-x86/rio.h
+++ b/include/asm-x86/rio.h
@@ -60,15 +60,4 @@ enum {
ALT_CALGARY = 5, /* Second Planar Calgary */
};
-/*
- * there is a real-mode segmented pointer pointing to the
- * 4K EBDA area at 0x40E.
- */
-static inline unsigned long get_bios_ebda(void)
-{
- unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL);
- address <<= 4;
- return address;
-}
-
#endif /* __ASM_RIO_H */
diff --git a/include/asm-x86/unistd.h b/include/asm-x86/unistd.h
index effc7ad8e12..2a58ed3e51d 100644
--- a/include/asm-x86/unistd.h
+++ b/include/asm-x86/unistd.h
@@ -1,5 +1,11 @@
#ifdef __KERNEL__
-# if defined(CONFIG_X86_32) || defined(__i386__)
+# ifdef CONFIG_X86_32
+# include "unistd_32.h"
+# else
+# include "unistd_64.h"
+# endif
+#else
+# ifdef __i386__
# include "unistd_32.h"
# else
# include "unistd_64.h"
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index cbb5ccb27de..bda6f04791d 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -210,7 +210,6 @@ unifdef-y += hayesesp.h
unifdef-y += hdlcdrv.h
unifdef-y += hdlc.h
unifdef-y += hdreg.h
-unifdef-y += hdsmart.h
unifdef-y += hid.h
unifdef-y += hiddev.h
unifdef-y += hidraw.h
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 40d54731de7..48bde600a2d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -112,4 +112,144 @@ static inline unsigned fls_long(unsigned long l)
return fls64(l);
}
+#ifdef __KERNEL__
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+extern unsigned long __find_first_bit(const unsigned long *addr,
+ unsigned long size);
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit.
+ */
+static __always_inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ /* Avoid a function call if the bitmap size is a constant */
+ /* and not bigger than BITS_PER_LONG. */
+
+ /* insert a sentinel so that __ffs returns size if there */
+ /* are no set bits in the bitmap */
+ if (__builtin_constant_p(size) && (size < BITS_PER_LONG))
+ return __ffs((*addr) | (1ul << size));
+
+ /* the result of __ffs(0) is undefined, so it needs to be */
+ /* handled separately */
+ if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
+ return ((*addr) == 0) ? BITS_PER_LONG : __ffs(*addr);
+
+ /* size is not constant or too big */
+ return __find_first_bit(addr, size);
+}
+
+extern unsigned long __find_first_zero_bit(const unsigned long *addr,
+ unsigned long size);
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first cleared bit.
+ */
+static __always_inline unsigned long
+find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ /* Avoid a function call if the bitmap size is a constant */
+ /* and not bigger than BITS_PER_LONG. */
+
+ /* insert a sentinel so that __ffs returns size if there */
+ /* are no set bits in the bitmap */
+ if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
+ return __ffs(~(*addr) | (1ul << size));
+ }
+
+ /* the result of __ffs(0) is undefined, so it needs to be */
+ /* handled separately */
+ if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
+ return (~(*addr) == 0) ? BITS_PER_LONG : __ffs(~(*addr));
+
+ /* size is not constant or too big */
+ return __find_first_zero_bit(addr, size);
+}
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
+extern unsigned long __find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+static __always_inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ unsigned long value;
+
+ /* Avoid a function call if the bitmap size is a constant */
+ /* and not bigger than BITS_PER_LONG. */
+
+ /* insert a sentinel so that __ffs returns size if there */
+ /* are no set bits in the bitmap */
+ if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
+ value = (*addr) & ((~0ul) << offset);
+ value |= (1ul << size);
+ return __ffs(value);
+ }
+
+ /* the result of __ffs(0) is undefined, so it needs to be */
+ /* handled separately */
+ if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
+ value = (*addr) & ((~0ul) << offset);
+ return (value == 0) ? BITS_PER_LONG : __ffs(value);
+ }
+
+ /* size is not constant or too big */
+ return __find_next_bit(addr, size, offset);
+}
+
+extern unsigned long __find_next_zero_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+static __always_inline unsigned long
+find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ unsigned long value;
+
+ /* Avoid a function call if the bitmap size is a constant */
+ /* and not bigger than BITS_PER_LONG. */
+
+ /* insert a sentinel so that __ffs returns size if there */
+ /* are no set bits in the bitmap */
+ if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
+ value = (~(*addr)) & ((~0ul) << offset);
+ value |= (1ul << size);
+ return __ffs(value);
+ }
+
+ /* the result of __ffs(0) is undefined, so it needs to be */
+ /* handled separately */
+ if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
+ value = (~(*addr)) & ((~0ul) << offset);
+ return (value == 0) ? BITS_PER_LONG : __ffs(value);
+ }
+
+ /* size is not constant or too big */
+ return __find_next_zero_bit(addr, size, offset);
+}
+#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
+#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index e8406c55c6d..cf0303a6061 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -56,19 +56,25 @@ struct sg_io_v4 {
#if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device {
struct device *class_dev;
- struct device *dev;
+ struct device *parent;
int minor;
struct request_queue *queue;
+ struct kref ref;
+ void (*release)(struct device *);
};
-extern int bsg_register_queue(struct request_queue *, struct device *, const char *);
+extern int bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ void (*release)(struct device *));
extern void bsg_unregister_queue(struct request_queue *);
#else
-static inline int bsg_register_queue(struct request_queue * rq, struct device *dev, const char *name)
+static inline int bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ void (*release)(struct device *))
{
return 0;
}
-static inline void bsg_unregister_queue(struct request_queue *rq)
+static inline void bsg_unregister_queue(struct request_queue *q)
{
}
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index fe23792f05c..b2fd7547b58 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -28,9 +28,16 @@
#define __must_be_array(a) \
BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0])))
-#define inline inline __attribute__((always_inline))
-#define __inline__ __inline__ __attribute__((always_inline))
-#define __inline __inline __attribute__((always_inline))
+/*
+ * Force always-inline if the user requests it so via the .config:
+ */
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+ !defined(CONFIG_OPTIMIZE_INLINING) && (__GNUC__ >= 4)
+# define inline inline __attribute__((always_inline))
+# define __inline__ __inline__ __attribute__((always_inline))
+# define __inline __inline __attribute__((always_inline))
+#endif
+
#define __deprecated __attribute__((deprecated))
#define __packed __attribute__((packed))
#define __weak __attribute__((weak))
diff --git a/include/linux/file.h b/include/linux/file.h
index 653477021e4..69baf5a4f0a 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -117,7 +117,8 @@ struct task_struct;
struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
-void reset_files_struct(struct task_struct *, struct files_struct *);
+void reset_files_struct(struct files_struct *);
+int unshare_files(struct files_struct **);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6556f2f967e..d6d7c52055c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1309,7 +1309,7 @@ struct super_operations {
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*clear_inode) (struct inode *);
- void (*umount_begin) (struct vfsmount *, int);
+ void (*umount_begin) (struct super_block *);
int (*show_options)(struct seq_file *, struct vfsmount *);
int (*show_stats)(struct seq_file *, struct vfsmount *);
@@ -2034,9 +2034,6 @@ static inline ino_t parent_ino(struct dentry *dentry)
return res;
}
-/* kernel/fork.c */
-extern int unshare_files(void);
-
/* Transaction based IO helpers */
/*
diff --git a/include/linux/hdsmart.h b/include/linux/hdsmart.h
deleted file mode 100644
index 4f4faf9d423..00000000000
--- a/include/linux/hdsmart.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * linux/include/linux/hdsmart.h
- *
- * Copyright (C) 1999-2000 Michael Cornwell <cornwell@acm.org>
- * Copyright (C) 2000 Andre Hedrick <andre@linux-ide.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _LINUX_HDSMART_H
-#define _LINUX_HDSMART_H
-
-#ifndef __KERNEL__
-#define OFFLINE_FULL_SCAN 0
-#define SHORT_SELF_TEST 1
-#define EXTEND_SELF_TEST 2
-#define SHORT_CAPTIVE_SELF_TEST 129
-#define EXTEND_CAPTIVE_SELF_TEST 130
-
-/* smart_attribute is the vendor specific in SFF-8035 spec */
-typedef struct ata_smart_attribute_s {
- unsigned char id;
- unsigned short status_flag;
- unsigned char normalized;
- unsigned char worse_normal;
- unsigned char raw[6];
- unsigned char reserv;
-} __attribute__ ((packed)) ata_smart_attribute_t;
-
-/* smart_values is format of the read drive Atrribute command */
-typedef struct ata_smart_values_s {
- unsigned short revnumber;
- ata_smart_attribute_t vendor_attributes [30];
- unsigned char offline_data_collection_status;
- unsigned char self_test_exec_status;
- unsigned short total_time_to_complete_off_line;
- unsigned char vendor_specific_366;
- unsigned char offline_data_collection_capability;
- unsigned short smart_capability;
- unsigned char errorlog_capability;
- unsigned char vendor_specific_371;
- unsigned char short_test_completion_time;
- unsigned char extend_test_completion_time;
- unsigned char reserved_374_385 [12];
- unsigned char vendor_specific_386_509 [125];
- unsigned char chksum;
-} __attribute__ ((packed)) ata_smart_values_t;
-
-/* Smart Threshold data structures */
-/* Vendor attribute of SMART Threshold */
-typedef struct ata_smart_threshold_entry_s {
- unsigned char id;
- unsigned char normalized_threshold;
- unsigned char reserved[10];
-} __attribute__ ((packed)) ata_smart_threshold_entry_t;
-
-/* Format of Read SMART THreshold Command */
-typedef struct ata_smart_thresholds_s {
- unsigned short revnumber;
- ata_smart_threshold_entry_t thres_entries[30];
- unsigned char reserved[149];
- unsigned char chksum;
-} __attribute__ ((packed)) ata_smart_thresholds_t;
-
-typedef struct ata_smart_errorlog_command_struct_s {
- unsigned char devicecontrolreg;
- unsigned char featuresreg;
- unsigned char sector_count;
- unsigned char sector_number;
- unsigned char cylinder_low;
- unsigned char cylinder_high;
- unsigned char drive_head;
- unsigned char commandreg;
- unsigned int timestamp;
-} __attribute__ ((packed)) ata_smart_errorlog_command_struct_t;
-
-typedef struct ata_smart_errorlog_error_struct_s {
- unsigned char error_condition;
- unsigned char extended_error[14];
- unsigned char state;
- unsigned short timestamp;
-} __attribute__ ((packed)) ata_smart_errorlog_error_struct_t;
-
-typedef struct ata_smart_errorlog_struct_s {
- ata_smart_errorlog_command_struct_t commands[6];
- ata_smart_errorlog_error_struct_t error_struct;
-} __attribute__ ((packed)) ata_smart_errorlog_struct_t;
-
-typedef struct ata_smart_errorlog_s {
- unsigned char revnumber;
- unsigned char error_log_pointer;
- ata_smart_errorlog_struct_t errorlog_struct[5];
- unsigned short ata_error_count;
- unsigned short non_fatal_count;
- unsigned short drive_timeout_count;
- unsigned char reserved[53];
- unsigned char chksum;
-} __attribute__ ((packed)) ata_smart_errorlog_t;
-
-typedef struct ata_smart_selftestlog_struct_s {
- unsigned char selftestnumber;
- unsigned char selfteststatus;
- unsigned short timestamp;
- unsigned char selftestfailurecheckpoint;
- unsigned int lbafirstfailure;
- unsigned char vendorspecific[15];
-} __attribute__ ((packed)) ata_smart_selftestlog_struct_t;
-
-typedef struct ata_smart_selftestlog_s {
- unsigned short revnumber;
- ata_smart_selftestlog_struct_t selftest_struct[21];
- unsigned char vendorspecific[2];
- unsigned char mostrecenttest;
- unsigned char resevered[2];
- unsigned char chksum;
-} __attribute__ ((packed)) ata_smart_selftestlog_t;
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_HDSMART_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 5f3e82ae901..32fd77bb443 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -48,13 +48,6 @@ typedef unsigned char byte; /* used everywhere */
#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
/*
- * Tune flags
- */
-#define IDE_TUNE_NOAUTO 2
-#define IDE_TUNE_AUTO 1
-#define IDE_TUNE_DEFAULT 0
-
-/*
* state flags
*/
@@ -68,23 +61,30 @@ typedef unsigned char byte; /* used everywhere */
*/
#define IDE_NR_PORTS (10)
-#define IDE_DATA_OFFSET (0)
-#define IDE_ERROR_OFFSET (1)
-#define IDE_NSECTOR_OFFSET (2)
-#define IDE_SECTOR_OFFSET (3)
-#define IDE_LCYL_OFFSET (4)
-#define IDE_HCYL_OFFSET (5)
-#define IDE_SELECT_OFFSET (6)
-#define IDE_STATUS_OFFSET (7)
-#define IDE_CONTROL_OFFSET (8)
-#define IDE_IRQ_OFFSET (9)
-
-#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET
-#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET
-#define IDE_ALTSTATUS_OFFSET IDE_CONTROL_OFFSET
-#define IDE_IREASON_OFFSET IDE_NSECTOR_OFFSET
-#define IDE_BCOUNTL_OFFSET IDE_LCYL_OFFSET
-#define IDE_BCOUNTH_OFFSET IDE_HCYL_OFFSET
+struct ide_io_ports {
+ unsigned long data_addr;
+
+ union {
+ unsigned long error_addr; /* read: error */
+ unsigned long feature_addr; /* write: feature */
+ };
+
+ unsigned long nsect_addr;
+ unsigned long lbal_addr;
+ unsigned long lbam_addr;
+ unsigned long lbah_addr;
+
+ unsigned long device_addr;
+
+ union {
+ unsigned long status_addr; /*  read: status  */
+ unsigned long command_addr; /* write: command */
+ };
+
+ unsigned long ctl_addr;
+
+ unsigned long irq_addr;
+};
#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
#define BAD_R_STAT (BUSY_STAT | ERR_STAT)
@@ -163,14 +163,17 @@ typedef u8 hwif_chipset_t;
* Structure to hold all information about the location of this port
*/
typedef struct hw_regs_s {
- unsigned long io_ports[IDE_NR_PORTS]; /* task file registers */
+ union {
+ struct ide_io_ports io_ports;
+ unsigned long io_ports_array[IDE_NR_PORTS];
+ };
+
int irq; /* our irq number */
ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
hwif_chipset_t chipset;
struct device *dev;
} hw_regs_t;
-struct hwif_s * ide_find_port(unsigned long);
void ide_init_port_data(struct hwif_s *, unsigned int);
void ide_init_port_hw(struct hwif_s *, hw_regs_t *);
@@ -180,10 +183,10 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
{
unsigned int i;
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
- hw->io_ports[i] = io_addr++;
+ for (i = 0; i <= 7; i++)
+ hw->io_ports_array[i] = io_addr++;
- hw->io_ports[IDE_CONTROL_OFFSET] = ctl_addr;
+ hw->io_ports.ctl_addr = ctl_addr;
}
#include <asm/ide.h>
@@ -329,7 +332,6 @@ typedef struct ide_drive_s {
unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */
unsigned doorlocking : 1; /* for removable only: door lock/unlock works */
unsigned nodma : 1; /* disallow DMA */
- unsigned autotune : 2; /* 0=default, 1=autotune, 2=noautotune */
unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */
unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */
unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */
@@ -388,6 +390,43 @@ typedef struct ide_drive_s {
struct ide_port_info;
+struct ide_port_ops {
+ /* host specific initialization of devices on a port */
+ void (*port_init_devs)(struct hwif_s *);
+ /* routine to program host for PIO mode */
+ void (*set_pio_mode)(ide_drive_t *, const u8);
+ /* routine to program host for DMA mode */
+ void (*set_dma_mode)(ide_drive_t *, const u8);
+ /* tweaks hardware to select drive */
+ void (*selectproc)(ide_drive_t *);
+ /* chipset polling based on hba specifics */
+ int (*reset_poll)(ide_drive_t *);
+ /* chipset specific changes to default for device-hba resets */
+ void (*pre_reset)(ide_drive_t *);
+ /* routine to reset controller after a disk reset */
+ void (*resetproc)(ide_drive_t *);
+ /* special host masking for drive selection */
+ void (*maskproc)(ide_drive_t *, int);
+ /* check host's drive quirk list */
+ void (*quirkproc)(ide_drive_t *);
+
+ u8 (*mdma_filter)(ide_drive_t *);
+ u8 (*udma_filter)(ide_drive_t *);
+
+ u8 (*cable_detect)(struct hwif_s *);
+};
+
+struct ide_dma_ops {
+ void (*dma_host_set)(struct ide_drive_s *, int);
+ int (*dma_setup)(struct ide_drive_s *);
+ void (*dma_exec_cmd)(struct ide_drive_s *, u8);
+ void (*dma_start)(struct ide_drive_s *);
+ int (*dma_end)(struct ide_drive_s *);
+ int (*dma_test_irq)(struct ide_drive_s *);
+ void (*dma_lost_irq)(struct ide_drive_s *);
+ void (*dma_timeout)(struct ide_drive_s *);
+};
+
typedef struct hwif_s {
struct hwif_s *next; /* for linked-list in ide_hwgroup_t */
struct hwif_s *mate; /* other hwif from same PCI chip */
@@ -396,8 +435,8 @@ typedef struct hwif_s {
char name[6]; /* name of interface, eg. "ide0" */
- /* task file registers for pata and sata */
- unsigned long io_ports[IDE_NR_PORTS];
+ struct ide_io_ports io_ports;
+
unsigned long sata_scr[SATA_NR_PORTS];
ide_drive_t drives[MAX_DRIVES]; /* drive info */
@@ -421,38 +460,12 @@ typedef struct hwif_s {
struct device *dev;
- const struct ide_port_info *cds; /* chipset device struct */
-
ide_ack_intr_t *ack_intr;
void (*rw_disk)(ide_drive_t *, struct request *);
-#if 0
- ide_hwif_ops_t *hwifops;
-#else
- /* host specific initialization of devices on a port */
- void (*port_init_devs)(struct hwif_s *);
- /* routine to program host for PIO mode */
- void (*set_pio_mode)(ide_drive_t *, const u8);
- /* routine to program host for DMA mode */
- void (*set_dma_mode)(ide_drive_t *, const u8);
- /* tweaks hardware to select drive */
- void (*selectproc)(ide_drive_t *);
- /* chipset polling based on hba specifics */
- int (*reset_poll)(ide_drive_t *);
- /* chipset specific changes to default for device-hba resets */
- void (*pre_reset)(ide_drive_t *);
- /* routine to reset controller after a disk reset */
- void (*resetproc)(ide_drive_t *);
- /* special host masking for drive selection */
- void (*maskproc)(ide_drive_t *, int);
- /* check host's drive quirk list */
- void (*quirkproc)(ide_drive_t *);
-#endif
- u8 (*mdma_filter)(ide_drive_t *);
- u8 (*udma_filter)(ide_drive_t *);
-
- u8 (*cable_detect)(struct hwif_s *);
+ const struct ide_port_ops *port_ops;
+ const struct ide_dma_ops *dma_ops;
void (*ata_input_data)(ide_drive_t *, void *, u32);
void (*ata_output_data)(ide_drive_t *, void *, u32);
@@ -460,15 +473,7 @@ typedef struct hwif_s {
void (*atapi_input_bytes)(ide_drive_t *, void *, u32);
void (*atapi_output_bytes)(ide_drive_t *, void *, u32);
- void (*dma_host_set)(ide_drive_t *, int);
- int (*dma_setup)(ide_drive_t *);
- void (*dma_exec_cmd)(ide_drive_t *, u8);
- void (*dma_start)(ide_drive_t *);
- int (*ide_dma_end)(ide_drive_t *drive);
- int (*ide_dma_test_irq)(ide_drive_t *drive);
void (*ide_dma_clear_irq)(ide_drive_t *drive);
- void (*dma_lost_irq)(ide_drive_t *drive);
- void (*dma_timeout)(ide_drive_t *drive);
void (*OUTB)(u8 addr, unsigned long port);
void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
@@ -515,14 +520,11 @@ typedef struct hwif_s {
unsigned long extra_base; /* extra addr for dma ports */
unsigned extra_ports; /* number of extra dma ports */
- unsigned noprobe : 1; /* don't probe for this interface */
unsigned present : 1; /* this interface exists */
unsigned serialized : 1; /* serialized all channel operation */
unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
- unsigned reset : 1; /* reset after probe */
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
unsigned mmio : 1; /* host uses MMIO */
- unsigned straight8 : 1; /* Alan's straight 8 check */
struct device gendev;
struct device *portdev;
@@ -703,10 +705,6 @@ void ide_add_generic_settings(ide_drive_t *);
read_proc_t proc_ide_read_capacity;
read_proc_t proc_ide_read_geometry;
-#ifdef CONFIG_BLK_DEV_IDEPCI
-void ide_pci_create_host_proc(const char *, get_info_t *);
-#endif
-
/*
* Standard exit stuff:
*/
@@ -807,8 +805,21 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig
#ifndef _IDE_C
extern ide_hwif_t ide_hwifs[]; /* master data repository */
#endif
+extern int ide_noacpi;
+extern int ide_acpigtf;
+extern int ide_acpionboot;
extern int noautodma;
+extern int ide_vlb_clk;
+extern int ide_pci_clk;
+
+ide_hwif_t *ide_find_port_slot(const struct ide_port_info *);
+
+static inline ide_hwif_t *ide_find_port(void)
+{
+ return ide_find_port_slot(NULL);
+}
+
extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
int uptodate, int nr_sectors);
@@ -1004,10 +1015,15 @@ void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8
void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-void ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
+int ide_pci_set_master(struct pci_dev *, const char *);
+unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
+int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
#else
-static inline void ide_hwif_setup_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d) { }
+static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
+ const struct ide_port_info *d)
+{
+ return -EINVAL;
+}
#endif
extern void default_hwif_iops(ide_hwif_t *);
@@ -1027,8 +1043,8 @@ enum {
IDE_HFLAG_SINGLE = (1 << 1),
/* don't use legacy PIO blacklist */
IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
- /* don't use conservative PIO "downgrade" */
- IDE_HFLAG_PIO_NO_DOWNGRADE = (1 << 3),
+ /* set for the second port of QD65xx */
+ IDE_HFLAG_QD_2ND_PORT = (1 << 3),
/* use PIO8/9 for prefetch off/on */
IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
/* use PIO6/7 for fast-devsel off/on */
@@ -1050,14 +1066,12 @@ enum {
IDE_HFLAG_VDMA = (1 << 11),
/* ATAPI DMA is unsupported */
IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
- /* set if host is a "bootable" controller */
- IDE_HFLAG_BOOTABLE = (1 << 13),
+ /* set if host is a "non-bootable" controller */
+ IDE_HFLAG_NON_BOOTABLE = (1 << 13),
/* host doesn't support DMA */
IDE_HFLAG_NO_DMA = (1 << 14),
/* check if host is PCI IDE device before allowing DMA */
IDE_HFLAG_NO_AUTODMA = (1 << 15),
- /* don't autotune PIO */
- IDE_HFLAG_NO_AUTOTUNE = (1 << 16),
/* host is CS5510/CS5520 */
IDE_HFLAG_CS5520 = IDE_HFLAG_VDMA,
/* no LBA48 */
@@ -1079,8 +1093,8 @@ enum {
/* unmask IRQs */
IDE_HFLAG_UNMASK_IRQS = (1 << 25),
IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26),
- /* host is CY82C693 */
- IDE_HFLAG_CY82C693 = (1 << 27),
+ /* serialize ports if DMA is possible (for sl82c105) */
+ IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
/* force host out of "simplex" mode */
IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
/* DSC overlap is unsupported */
@@ -1092,9 +1106,9 @@ enum {
};
#ifdef CONFIG_BLK_DEV_OFFBOARD
-# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_BOOTABLE
-#else
# define IDE_HFLAG_OFF_BOARD 0
+#else
+# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
#endif
struct ide_port_info {
@@ -1102,10 +1116,14 @@ struct ide_port_info {
unsigned int (*init_chipset)(struct pci_dev *, const char *);
void (*init_iops)(ide_hwif_t *);
void (*init_hwif)(ide_hwif_t *);
- void (*init_dma)(ide_hwif_t *, unsigned long);
+ int (*init_dma)(ide_hwif_t *,
+ const struct ide_port_info *);
+
+ const struct ide_port_ops *port_ops;
+ const struct ide_dma_ops *dma_ops;
+
ide_pci_enablebit_t enablebits[2];
hwif_chipset_t chipset;
- u8 extra;
u32 host_flags;
u8 pio_mask;
u8 swdma_mask;
@@ -1152,13 +1170,16 @@ void ide_destroy_dmatable(ide_drive_t *);
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
extern int ide_build_dmatable(ide_drive_t *, struct request *);
-extern int ide_release_dma(ide_hwif_t *);
-extern void ide_setup_dma(ide_hwif_t *, unsigned long);
+int ide_allocate_dma_engine(ide_hwif_t *);
+void ide_release_dma_engine(ide_hwif_t *);
+void ide_setup_dma(ide_hwif_t *, unsigned long);
void ide_dma_host_set(ide_drive_t *, int);
extern int ide_dma_setup(ide_drive_t *);
+void ide_dma_exec_cmd(ide_drive_t *, u8);
extern void ide_dma_start(ide_drive_t *);
extern int __ide_dma_end(ide_drive_t *);
+int ide_dma_test_irq(ide_drive_t *);
extern void ide_dma_lost_irq(ide_drive_t *);
extern void ide_dma_timeout(ide_drive_t *);
#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
@@ -1176,7 +1197,7 @@ static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
#endif /* CONFIG_BLK_DEV_IDEDMA */
#ifndef CONFIG_BLK_DEV_IDEDMA_SFF
-static inline void ide_release_dma(ide_hwif_t *drive) {;}
+static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
#endif
#ifdef CONFIG_BLK_DEV_IDEACPI
@@ -1196,17 +1217,18 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
#endif
void ide_remove_port_from_hwgroup(ide_hwif_t *);
-extern int ide_hwif_request_regions(ide_hwif_t *hwif);
-extern void ide_hwif_release_regions(ide_hwif_t* hwif);
-void ide_unregister(unsigned int);
+void ide_unregister(ide_hwif_t *);
void ide_register_region(struct gendisk *);
void ide_unregister_region(struct gendisk *);
void ide_undecoded_slave(ide_drive_t *);
+void ide_port_apply_params(ide_hwif_t *);
+
int ide_device_add_all(u8 *idx, const struct ide_port_info *);
int ide_device_add(u8 idx[4], const struct ide_port_info *);
+int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
void ide_port_unregister_devices(ide_hwif_t *);
void ide_port_scan(ide_hwif_t *);
@@ -1315,29 +1337,28 @@ static inline void ide_set_irq(ide_drive_t *drive, int on)
{
ide_hwif_t *hwif = drive->hwif;
- hwif->OUTB(drive->ctl | (on ? 0 : 2),
- hwif->io_ports[IDE_CONTROL_OFFSET]);
+ hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr);
}
static inline u8 ide_read_status(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- return hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+ return hwif->INB(hwif->io_ports.status_addr);
}
static inline u8 ide_read_altstatus(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- return hwif->INB(hwif->io_ports[IDE_CONTROL_OFFSET]);
+ return hwif->INB(hwif->io_ports.ctl_addr);
}
static inline u8 ide_read_error(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]);
+ return hwif->INB(hwif->io_ports.error_addr);
}
/*
@@ -1350,7 +1371,7 @@ static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount)
/* FIXME: use ->atapi_input_bytes */
while (bcount--)
- (void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]);
+ (void)hwif->INB(hwif->io_ports.data_addr);
}
static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
@@ -1359,7 +1380,7 @@ static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
/* FIXME: use ->atapi_output_bytes */
while (bcount--)
- hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]);
+ hwif->OUTB(0, hwif->io_ports.data_addr);
}
#endif /* _IDE_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index c1ec04fd000..a281afeddfb 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -8,11 +8,18 @@
*/
#include <asm/types.h>
+#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <asm/kvm.h>
#define KVM_API_VERSION 12
+/* for KVM_TRACE_ENABLE */
+struct kvm_user_trace_setup {
+ __u32 buf_size; /* sub_buffer size of each per-cpu */
+ __u32 buf_nr; /* the number of sub_buffers of each per-cpu */
+};
+
/* for KVM_CREATE_MEMORY_REGION */
struct kvm_memory_region {
__u32 slot;
@@ -73,6 +80,9 @@ struct kvm_irqchip {
#define KVM_EXIT_INTR 10
#define KVM_EXIT_SET_TPR 11
#define KVM_EXIT_TPR_ACCESS 12
+#define KVM_EXIT_S390_SIEIC 13
+#define KVM_EXIT_S390_RESET 14
+#define KVM_EXIT_DCR 15
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
@@ -137,6 +147,27 @@ struct kvm_run {
__u32 is_write;
__u32 pad;
} tpr_access;
+ /* KVM_EXIT_S390_SIEIC */
+ struct {
+ __u8 icptcode;
+ __u64 mask; /* psw upper half */
+ __u64 addr; /* psw lower half */
+ __u16 ipa;
+ __u32 ipb;
+ } s390_sieic;
+ /* KVM_EXIT_S390_RESET */
+#define KVM_S390_RESET_POR 1
+#define KVM_S390_RESET_CLEAR 2
+#define KVM_S390_RESET_SUBSYSTEM 4
+#define KVM_S390_RESET_CPU_INIT 8
+#define KVM_S390_RESET_IPL 16
+ __u64 s390_reset_flags;
+ /* KVM_EXIT_DCR */
+ struct {
+ __u32 dcrn;
+ __u32 data;
+ __u8 is_write;
+ } dcr;
/* Fix the size of the union. */
char padding[256];
};
@@ -204,6 +235,74 @@ struct kvm_vapic_addr {
__u64 vapic_addr;
};
+/* for KVM_SET_MPSTATE */
+
+#define KVM_MP_STATE_RUNNABLE 0
+#define KVM_MP_STATE_UNINITIALIZED 1
+#define KVM_MP_STATE_INIT_RECEIVED 2
+#define KVM_MP_STATE_HALTED 3
+#define KVM_MP_STATE_SIPI_RECEIVED 4
+
+struct kvm_mp_state {
+ __u32 mp_state;
+};
+
+struct kvm_s390_psw {
+ __u64 mask;
+ __u64 addr;
+};
+
+/* valid values for type in kvm_s390_interrupt */
+#define KVM_S390_SIGP_STOP 0xfffe0000u
+#define KVM_S390_PROGRAM_INT 0xfffe0001u
+#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
+#define KVM_S390_RESTART 0xfffe0003u
+#define KVM_S390_INT_VIRTIO 0xffff2603u
+#define KVM_S390_INT_SERVICE 0xffff2401u
+#define KVM_S390_INT_EMERGENCY 0xffff1201u
+
+struct kvm_s390_interrupt {
+ __u32 type;
+ __u32 parm;
+ __u64 parm64;
+};
+
+#define KVM_TRC_SHIFT 16
+/*
+ * kvm trace categories
+ */
+#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
+#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) /* only 12 bits */
+
+/*
+ * kvm trace action
+ */
+#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
+#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
+#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
+
+#define KVM_TRC_HEAD_SIZE 12
+#define KVM_TRC_CYCLE_SIZE 8
+#define KVM_TRC_EXTRA_MAX 7
+
+/* This structure represents a single trace buffer record. */
+struct kvm_trace_rec {
+ __u32 event:28;
+ __u32 extra_u32:3;
+ __u32 cycle_in:1;
+ __u32 pid;
+ __u32 vcpu_id;
+ union {
+ struct {
+ __u32 cycle_lo, cycle_hi;
+ __u32 extra_u32[KVM_TRC_EXTRA_MAX];
+ } cycle;
+ struct {
+ __u32 extra_u32[KVM_TRC_EXTRA_MAX];
+ } nocycle;
+ } u;
+};
+
#define KVMIO 0xAE
/*
@@ -212,6 +311,8 @@ struct kvm_vapic_addr {
#define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
#define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */
#define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list)
+
+#define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06)
/*
* Check if a kvm extension is available. Argument is extension number,
* return is 1 (yes) or 0 (no, sorry).
@@ -222,7 +323,12 @@ struct kvm_vapic_addr {
*/
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-
+/*
+ * ioctls for kvm trace
+ */
+#define KVM_TRACE_ENABLE _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
+#define KVM_TRACE_PAUSE _IO(KVMIO, 0x07)
+#define KVM_TRACE_DISABLE _IO(KVMIO, 0x08)
/*
* Extension capability list.
*/
@@ -233,6 +339,13 @@ struct kvm_vapic_addr {
#define KVM_CAP_SET_TSS_ADDR 4
#define KVM_CAP_VAPIC 6
#define KVM_CAP_EXT_CPUID 7
+#define KVM_CAP_CLOCKSOURCE 8
+#define KVM_CAP_NR_VCPUS 9 /* returns max vcpus per vm */
+#define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */
+#define KVM_CAP_PIT 11
+#define KVM_CAP_NOP_IO_DELAY 12
+#define KVM_CAP_PV_MMU 13
+#define KVM_CAP_MP_STATE 14
/*
* ioctls for VM fds
@@ -255,6 +368,9 @@ struct kvm_vapic_addr {
#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
#define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip)
#define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip)
+#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
+#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
+#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
/*
* ioctls for vcpu fds
@@ -281,5 +397,17 @@ struct kvm_vapic_addr {
#define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
/* Available with KVM_CAP_VAPIC */
#define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
+/* valid for virtual machine (for floating interrupt)_and_ vcpu */
+#define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt)
+/* store status for s390 */
+#define KVM_S390_STORE_STATUS_NOADDR (-1ul)
+#define KVM_S390_STORE_STATUS_PREFIXED (-2ul)
+#define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long)
+/* initial ipl psw for s390 */
+#define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw)
+/* initial reset for s390 */
+#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
+#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
+#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 928b0d59e9b..398978972b7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/preempt.h>
+#include <linux/marker.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -24,29 +25,18 @@
#include <asm/kvm_host.h>
-#define KVM_MAX_VCPUS 4
-#define KVM_MEMORY_SLOTS 8
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
-
-#define KVM_PIO_PAGE_OFFSET 1
-
/*
* vcpu->requests bit members
*/
#define KVM_REQ_TLB_FLUSH 0
#define KVM_REQ_MIGRATE_TIMER 1
#define KVM_REQ_REPORT_TPR_ACCESS 2
+#define KVM_REQ_MMU_RELOAD 3
+#define KVM_REQ_TRIPLE_FAULT 4
struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
-struct kvm_guest_debug {
- int enabled;
- unsigned long bp[4];
- int singlestep;
-};
-
/*
* It would be nice to use something smarter than a linear search, TBD...
* Thankfully we dont expect many devices to register (famous last words :),
@@ -67,7 +57,9 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
struct kvm_vcpu {
struct kvm *kvm;
+#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier preempt_notifier;
+#endif
int vcpu_id;
struct mutex mutex;
int cpu;
@@ -100,6 +92,10 @@ struct kvm_memory_slot {
unsigned long flags;
unsigned long *rmap;
unsigned long *dirty_bitmap;
+ struct {
+ unsigned long rmap_pde;
+ int write_count;
+ } *lpage_info;
unsigned long userspace_addr;
int user_alloc;
};
@@ -114,11 +110,11 @@ struct kvm {
KVM_PRIVATE_MEM_SLOTS];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
struct list_head vm_list;
- struct file *filp;
struct kvm_io_bus mmio_bus;
struct kvm_io_bus pio_bus;
struct kvm_vm_stat stat;
struct kvm_arch arch;
+ atomic_t users_count;
};
/* The guest did something we don't support. */
@@ -145,14 +141,19 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
struct module *module);
void kvm_exit(void);
+void kvm_get_kvm(struct kvm *kvm);
+void kvm_put_kvm(struct kvm *kvm);
+
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
extern struct page *bad_page;
+extern pfn_t bad_pfn;
int is_error_page(struct page *page);
+int is_error_pfn(pfn_t pfn);
int kvm_is_error_hva(unsigned long addr);
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
@@ -166,8 +167,19 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int user_alloc);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
+void kvm_set_page_dirty(struct page *page);
+void kvm_set_page_accessed(struct page *page);
+
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+void kvm_release_pfn_dirty(pfn_t);
+void kvm_release_pfn_clean(pfn_t pfn);
+void kvm_set_pfn_dirty(pfn_t pfn);
+void kvm_set_pfn_accessed(pfn_t pfn);
+void kvm_get_pfn(pfn_t pfn);
+
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -188,6 +200,7 @@ void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_reload_remote_mmus(struct kvm *kvm);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -223,6 +236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs);
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state);
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
@@ -255,6 +272,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
static inline void kvm_guest_enter(void)
@@ -296,5 +314,18 @@ struct kvm_stats_debugfs_item {
struct dentry *dentry;
};
extern struct kvm_stats_debugfs_item debugfs_entries[];
+extern struct dentry *kvm_debugfs_dir;
+
+#ifdef CONFIG_KVM_TRACE
+int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
+void kvm_trace_cleanup(void);
+#else
+static inline
+int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+#define kvm_trace_cleanup() ((void)0)
+#endif
#endif
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 5497aac0d2f..3ddce03766c 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -11,8 +11,11 @@
/* Return values for hypercalls */
#define KVM_ENOSYS 1000
+#define KVM_EFAULT EFAULT
+#define KVM_E2BIG E2BIG
-#define KVM_HC_VAPIC_POLL_IRQ 1
+#define KVM_HC_VAPIC_POLL_IRQ 1
+#define KVM_HC_MMU_OP 2
/*
* hypercalls use architecture specific
@@ -20,6 +23,12 @@
#include <asm/kvm_para.h>
#ifdef __KERNEL__
+#ifdef CONFIG_KVM_GUEST
+void __init kvm_guest_init(void);
+#else
+#define kvm_guest_init() do { } while (0)
+#endif
+
static inline int kvm_para_has_feature(unsigned int feature)
{
if (kvm_arch_para_features() & (1UL << feature))
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 1c4e46decb2..9b6f395c962 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -38,6 +38,8 @@ typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef unsigned long hfn_t;
+typedef hfn_t pfn_t;
+
struct kvm_pio_request {
unsigned long count;
int cur_count;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ff7df1a2222..9fa1a8002ce 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -208,6 +208,38 @@ struct mlx4_mtt {
int page_shift;
};
+enum {
+ MLX4_DB_PER_PAGE = PAGE_SIZE / 4
+};
+
+struct mlx4_db_pgdir {
+ struct list_head list;
+ DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
+ DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
+ unsigned long *bits[2];
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
+struct mlx4_ib_user_db_page;
+
+struct mlx4_db {
+ __be32 *db;
+ union {
+ struct mlx4_db_pgdir *pgdir;
+ struct mlx4_ib_user_db_page *user_page;
+ } u;
+ dma_addr_t dma;
+ int index;
+ int order;
+};
+
+struct mlx4_hwq_resources {
+ struct mlx4_db db;
+ struct mlx4_mtt mtt;
+ struct mlx4_buf buf;
+};
+
struct mlx4_mr {
struct mlx4_mtt mtt;
u64 iova;
@@ -341,6 +373,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_buf *buf);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+ int size, int max_direct);
+void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
+ int size);
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index a5e43febee4..7f128b266fa 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
struct mlx4_qp_context *context);
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_qp_context *context,
+ struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
+
static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
{
return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b695875d63e..286d3152160 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1229,6 +1229,7 @@ void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(struct page *start_page,
unsigned long pages, int node);
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
+void vmemmap_populate_print_last(void);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d0bd97044ab..9a4f3e63e3b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1798,6 +1798,8 @@ extern void mmput(struct mm_struct *);
extern struct mm_struct *get_task_mm(struct task_struct *task);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
+/* Allocate a new mm structure and copy contents from tsk->mm */
+extern struct mm_struct *dup_mm(struct task_struct *tsk);
extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
extern void flush_thread(void);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 03378e3515b..add3c5a4082 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -32,7 +32,7 @@ struct attribute {
struct attribute_group {
const char *name;
- int (*is_visible)(struct kobject *,
+ mode_t (*is_visible)(struct kobject *,
struct attribute *, int);
struct attribute **attrs;
};
@@ -105,6 +105,8 @@ void sysfs_remove_link(struct kobject *kobj, const char *name);
int __must_check sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp);
+int sysfs_update_group(struct kobject *kobj,
+ const struct attribute_group *grp);
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp);
int sysfs_add_file_to_group(struct kobject *kobj,
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b8b19e2f57b..f6a9fe0ef09 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -181,7 +181,8 @@ struct scsi_device {
sdev_printk(prefix, (scmd)->device, fmt, ##a)
enum scsi_target_state {
- STARGET_RUNNING = 1,
+ STARGET_CREATED = 1,
+ STARGET_RUNNING,
STARGET_DEL,
};
diff --git a/kernel/exit.c b/kernel/exit.c
index cece89f80ab..97f609f574b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -507,10 +507,9 @@ void put_files_struct(struct files_struct *files)
}
}
-EXPORT_SYMBOL(put_files_struct);
-
-void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
+void reset_files_struct(struct files_struct *files)
{
+ struct task_struct *tsk = current;
struct files_struct *old;
old = tsk->files;
@@ -519,7 +518,6 @@ void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
task_unlock(tsk);
put_files_struct(old);
}
-EXPORT_SYMBOL(reset_files_struct);
void exit_files(struct task_struct *tsk)
{
diff --git a/kernel/fork.c b/kernel/fork.c
index 89fe414645e..c674aa8d3c3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -521,7 +521,7 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
-static struct mm_struct *dup_mm(struct task_struct *tsk)
+struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
@@ -805,12 +805,6 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
goto out;
}
- /*
- * Note: we may be using current for both targets (See exec.c)
- * This works because we cache current->files (old) as oldf. Don't
- * break this.
- */
- tsk->files = NULL;
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
@@ -846,34 +840,6 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
return 0;
}
-/*
- * Helper to unshare the files of the current task.
- * We don't want to expose copy_files internals to
- * the exec layer of the kernel.
- */
-
-int unshare_files(void)
-{
- struct files_struct *files = current->files;
- int rc;
-
- BUG_ON(!files);
-
- /* This can race but the race causes us to copy when we don't
- need to and drop the copy */
- if(atomic_read(&files->count) == 1)
- {
- atomic_inc(&files->count);
- return 0;
- }
- rc = copy_files(0, current);
- if(rc)
- current->files = files;
- return rc;
-}
-
-EXPORT_SYMBOL(unshare_files);
-
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
@@ -1811,3 +1777,27 @@ bad_unshare_cleanup_thread:
bad_unshare_out:
return err;
}
+
+/*
+ * Helper to unshare the files of the current task.
+ * We don't want to expose copy_files internals to
+ * the exec layer of the kernel.
+ */
+
+int unshare_files(struct files_struct **displaced)
+{
+ struct task_struct *task = current;
+ struct files_struct *copy = NULL;
+ int error;
+
+ error = unshare_fd(CLONE_FILES, &copy);
+ if (error || !copy) {
+ *displaced = NULL;
+ return error;
+ }
+ *displaced = task->files;
+ task_lock(task);
+ task->files = copy;
+ task_unlock(task);
+ return 0;
+}
diff --git a/lib/Kconfig b/lib/Kconfig
index 2d53dc092e8..8cc8e8722a3 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -7,6 +7,12 @@ menu "Library routines"
config BITREVERSE
tristate
+config GENERIC_FIND_FIRST_BIT
+ def_bool n
+
+config GENERIC_FIND_NEXT_BIT
+ def_bool n
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Makefile b/lib/Makefile
index bf8000fc7d4..2d7001b7f5a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index 78ccd73a884..d3f5784807b 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -16,14 +16,12 @@
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
+#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
+/*
+ * Find the next set bit in a memory region.
*/
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+unsigned long __find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -60,15 +58,14 @@ found_first:
found_middle:
return result + __ffs(tmp);
}
-
-EXPORT_SYMBOL(find_next_bit);
+EXPORT_SYMBOL(__find_next_bit);
/*
* This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h.
*/
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+unsigned long __find_next_zero_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -105,8 +102,64 @@ found_first:
found_middle:
return result + ffz(tmp);
}
+EXPORT_SYMBOL(__find_next_zero_bit);
+#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
+
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long __find_first_bit(const unsigned long *addr,
+ unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
-EXPORT_SYMBOL(find_next_zero_bit);
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(__find_first_bit);
+
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long __find_first_zero_bit(const unsigned long *addr,
+ unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if (~(tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) | (~0UL << size);
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found:
+ return result + ffz(tmp);
+}
+EXPORT_SYMBOL(__find_first_zero_bit);
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
#ifdef __BIG_ENDIAN
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 2ccea700968..b6791646143 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -111,44 +111,74 @@ static unsigned long __init init_bootmem_core(pg_data_t *pgdat,
* might be used for boot-time allocations - or it might get added
* to the free page pool later on.
*/
-static int __init reserve_bootmem_core(bootmem_data_t *bdata,
+static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
unsigned long addr, unsigned long size, int flags)
{
unsigned long sidx, eidx;
unsigned long i;
- int ret;
+
+ BUG_ON(!size);
+
+ /* out of range, don't hold other */
+ if (addr + size < bdata->node_boot_start ||
+ PFN_DOWN(addr) > bdata->node_low_pfn)
+ return 0;
/*
- * round up, partially reserved pages are considered
- * fully reserved.
+ * Round up to index to the range.
*/
+ if (addr > bdata->node_boot_start)
+ sidx= PFN_DOWN(addr - bdata->node_boot_start);
+ else
+ sidx = 0;
+
+ eidx = PFN_UP(addr + size - bdata->node_boot_start);
+ if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
+ eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
+
+ for (i = sidx; i < eidx; i++) {
+ if (test_bit(i, bdata->node_bootmem_map)) {
+ if (flags & BOOTMEM_EXCLUSIVE)
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+
+}
+
+static void __init reserve_bootmem_core(bootmem_data_t *bdata,
+ unsigned long addr, unsigned long size, int flags)
+{
+ unsigned long sidx, eidx;
+ unsigned long i;
+
BUG_ON(!size);
- BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn);
- BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn);
- BUG_ON(addr < bdata->node_boot_start);
- sidx = PFN_DOWN(addr - bdata->node_boot_start);
+ /* out of range */
+ if (addr + size < bdata->node_boot_start ||
+ PFN_DOWN(addr) > bdata->node_low_pfn)
+ return;
+
+ /*
+ * Round up to index to the range.
+ */
+ if (addr > bdata->node_boot_start)
+ sidx= PFN_DOWN(addr - bdata->node_boot_start);
+ else
+ sidx = 0;
+
eidx = PFN_UP(addr + size - bdata->node_boot_start);
+ if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
+ eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
- for (i = sidx; i < eidx; i++)
+ for (i = sidx; i < eidx; i++) {
if (test_and_set_bit(i, bdata->node_bootmem_map)) {
#ifdef CONFIG_DEBUG_BOOTMEM
printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
#endif
- if (flags & BOOTMEM_EXCLUSIVE) {
- ret = -EBUSY;
- goto err;
- }
}
-
- return 0;
-
-err:
- /* unreserve memory we accidentally reserved */
- for (i--; i >= sidx; i--)
- clear_bit(i, bdata->node_bootmem_map);
-
- return ret;
+ }
}
static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
@@ -206,9 +236,11 @@ void * __init
__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
unsigned long align, unsigned long goal, unsigned long limit)
{
- unsigned long offset, remaining_size, areasize, preferred;
+ unsigned long areasize, preferred;
unsigned long i, start = 0, incr, eidx, end_pfn;
void *ret;
+ unsigned long node_boot_start;
+ void *node_bootmem_map;
if (!size) {
printk("__alloc_bootmem_core(): zero-sized request\n");
@@ -216,70 +248,83 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
}
BUG_ON(align & (align-1));
- if (limit && bdata->node_boot_start >= limit)
- return NULL;
-
/* on nodes without memory - bootmem_map is NULL */
if (!bdata->node_bootmem_map)
return NULL;
+ /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
+ node_boot_start = bdata->node_boot_start;
+ node_bootmem_map = bdata->node_bootmem_map;
+ if (align) {
+ node_boot_start = ALIGN(bdata->node_boot_start, align);
+ if (node_boot_start > bdata->node_boot_start)
+ node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
+ PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
+ }
+
+ if (limit && node_boot_start >= limit)
+ return NULL;
+
end_pfn = bdata->node_low_pfn;
limit = PFN_DOWN(limit);
if (limit && end_pfn > limit)
end_pfn = limit;
- eidx = end_pfn - PFN_DOWN(bdata->node_boot_start);
- offset = 0;
- if (align && (bdata->node_boot_start & (align - 1UL)) != 0)
- offset = align - (bdata->node_boot_start & (align - 1UL));
- offset = PFN_DOWN(offset);
+ eidx = end_pfn - PFN_DOWN(node_boot_start);
/*
* We try to allocate bootmem pages above 'goal'
* first, then we try to allocate lower pages.
*/
- if (goal && goal >= bdata->node_boot_start && PFN_DOWN(goal) < end_pfn) {
- preferred = goal - bdata->node_boot_start;
+ preferred = 0;
+ if (goal && PFN_DOWN(goal) < end_pfn) {
+ if (goal > node_boot_start)
+ preferred = goal - node_boot_start;
- if (bdata->last_success >= preferred)
+ if (bdata->last_success > node_boot_start &&
+ bdata->last_success - node_boot_start >= preferred)
if (!limit || (limit && limit > bdata->last_success))
- preferred = bdata->last_success;
- } else
- preferred = 0;
+ preferred = bdata->last_success - node_boot_start;
+ }
- preferred = PFN_DOWN(ALIGN(preferred, align)) + offset;
+ preferred = PFN_DOWN(ALIGN(preferred, align));
areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
incr = align >> PAGE_SHIFT ? : 1;
restart_scan:
- for (i = preferred; i < eidx; i += incr) {
+ for (i = preferred; i < eidx;) {
unsigned long j;
- i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
+
+ i = find_next_zero_bit(node_bootmem_map, eidx, i);
i = ALIGN(i, incr);
if (i >= eidx)
break;
- if (test_bit(i, bdata->node_bootmem_map))
+ if (test_bit(i, node_bootmem_map)) {
+ i += incr;
continue;
+ }
for (j = i + 1; j < i + areasize; ++j) {
if (j >= eidx)
goto fail_block;
- if (test_bit(j, bdata->node_bootmem_map))
+ if (test_bit(j, node_bootmem_map))
goto fail_block;
}
start = i;
goto found;
fail_block:
i = ALIGN(j, incr);
+ if (i == j)
+ i += incr;
}
- if (preferred > offset) {
- preferred = offset;
+ if (preferred > 0) {
+ preferred = 0;
goto restart_scan;
}
return NULL;
found:
- bdata->last_success = PFN_PHYS(start);
+ bdata->last_success = PFN_PHYS(start) + node_boot_start;
BUG_ON(start >= eidx);
/*
@@ -289,6 +334,7 @@ found:
*/
if (align < PAGE_SIZE &&
bdata->last_offset && bdata->last_pos+1 == start) {
+ unsigned long offset, remaining_size;
offset = ALIGN(bdata->last_offset, align);
BUG_ON(offset > PAGE_SIZE);
remaining_size = PAGE_SIZE - offset;
@@ -297,14 +343,12 @@ found:
/* last_pos unchanged */
bdata->last_offset = offset + size;
ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
- offset +
- bdata->node_boot_start);
+ offset + node_boot_start);
} else {
remaining_size = size - remaining_size;
areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
- offset +
- bdata->node_boot_start);
+ offset + node_boot_start);
bdata->last_pos = start + areasize - 1;
bdata->last_offset = remaining_size;
}
@@ -312,14 +356,14 @@ found:
} else {
bdata->last_pos = start + areasize - 1;
bdata->last_offset = size & ~PAGE_MASK;
- ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start);
+ ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
}
/*
* Reserve the area now:
*/
for (i = start; i < start + areasize; i++)
- if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map)))
+ if (unlikely(test_and_set_bit(i, node_bootmem_map)))
BUG();
memset(ret, 0, size);
return ret;
@@ -401,6 +445,11 @@ unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags)
{
+ int ret;
+
+ ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
+ if (ret < 0)
+ return;
reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
}
@@ -426,7 +475,18 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags)
{
- return reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size, flags);
+ bootmem_data_t *bdata;
+ int ret;
+
+ list_for_each_entry(bdata, &bdata_list, list) {
+ ret = can_reserve_bootmem_core(bdata, addr, size, flags);
+ if (ret < 0)
+ return ret;
+ }
+ list_for_each_entry(bdata, &bdata_list, list)
+ reserve_bootmem_core(bdata, addr, size, flags);
+
+ return 0;
}
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
diff --git a/mm/rmap.c b/mm/rmap.c
index 997f06907b6..e9bb6b1093f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -413,9 +413,6 @@ int page_referenced(struct page *page, int is_locked,
{
int referenced = 0;
- if (page_test_and_clear_young(page))
- referenced++;
-
if (TestClearPageReferenced(page))
referenced++;
@@ -433,6 +430,10 @@ int page_referenced(struct page *page, int is_locked,
unlock_page(page);
}
}
+
+ if (page_test_and_clear_young(page))
+ referenced++;
+
return referenced;
}
diff --git a/mm/sparse.c b/mm/sparse.c
index 98d6b39c347..7e9191381f8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -295,6 +295,9 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
return NULL;
}
+void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
+{
+}
/*
* Allocate the accumulated non-linear sections, allocate a mem_map
* for each and record the physical to section mapping.
@@ -304,22 +307,50 @@ void __init sparse_init(void)
unsigned long pnum;
struct page *map;
unsigned long *usemap;
+ unsigned long **usemap_map;
+ int size;
+
+ /*
+ * map is using big page (aka 2M in x86 64 bit)
+ * usemap is less one page (aka 24 bytes)
+ * so alloc 2M (with 2M align) and 24 bytes in turn will
+ * make next 2M slip to one more 2M later.
+ * then in big system, the memory will have a lot of holes...
+ * here try to allocate 2M pages continously.
+ *
+ * powerpc need to call sparse_init_one_section right after each
+ * sparse_early_mem_map_alloc, so allocate usemap_map at first.
+ */
+ size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
+ usemap_map = alloc_bootmem(size);
+ if (!usemap_map)
+ panic("can not allocate usemap_map\n");
for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
if (!present_section_nr(pnum))
continue;
+ usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
+ }
- map = sparse_early_mem_map_alloc(pnum);
- if (!map)
+ for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+ if (!present_section_nr(pnum))
continue;
- usemap = sparse_early_usemap_alloc(pnum);
+ usemap = usemap_map[pnum];
if (!usemap)
continue;
+ map = sparse_early_mem_map_alloc(pnum);
+ if (!map)
+ continue;
+
sparse_init_one_section(__nr_to_section(pnum), pnum, map,
usemap);
}
+
+ vmemmap_populate_print_last();
+
+ free_bootmem(__pa(usemap_map), size);
}
#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 742003d3a84..9ee3affab34 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/jhash.h>
+#include <asm/unaligned.h>
#include "ieee80211_i.h"
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 02de8f1522a..3df809222d1 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -7,7 +7,6 @@
* published by the Free Software Foundation.
*/
-#include <asm/unaligned.h>
#include "mesh.h"
#define TEST_FRAME_LEN 8192
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 24b3c8fe6bc..a098a0454dc 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -76,7 +76,7 @@ modpost = scripts/mod/modpost \
$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
$(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
$(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
- $(if $(iKBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(EXTRA_SYMBOLS))) \
$(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
$(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
$(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index f8b42ab0724..757294b4f32 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1552,10 +1552,10 @@ static void read_symbols(char *modname)
}
license = get_modinfo(info.modinfo, info.modinfo_len, "license");
- if (!license && !is_vmlinux(modname))
- fatal("modpost: missing MODULE_LICENSE() in %s\n"
- "see include/linux/module.h for "
- "more information\n", modname);
+ if (info.modinfo && !license && !is_vmlinux(modname))
+ warn("modpost: missing MODULE_LICENSE() in %s\n"
+ "see include/linux/module.h for "
+ "more information\n", modname);
while (license) {
if (license_is_gpl_compatible(license))
mod->gpl_compatible = 1;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b2e12893e3f..c82cf15730a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -40,6 +40,7 @@
#include <linux/kvm_para.h>
#include <linux/pagemap.h>
#include <linux/mman.h>
+#include <linux/swap.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -59,7 +60,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
static __read_mostly struct preempt_ops kvm_preempt_ops;
-static struct dentry *debugfs_dir;
+struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
@@ -119,6 +120,29 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
smp_call_function_mask(cpus, ack_flush, NULL, 1);
}
+void kvm_reload_remote_mmus(struct kvm *kvm)
+{
+ int i, cpu;
+ cpumask_t cpus;
+ struct kvm_vcpu *vcpu;
+
+ cpus_clear(cpus);
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ vcpu = kvm->vcpus[i];
+ if (!vcpu)
+ continue;
+ if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+ continue;
+ cpu = vcpu->cpu;
+ if (cpu != -1 && cpu != raw_smp_processor_id())
+ cpu_set(cpu, cpus);
+ }
+ if (cpus_empty(cpus))
+ return;
+ smp_call_function_mask(cpus, ack_flush, NULL, 1);
+}
+
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
struct page *page;
@@ -170,6 +194,7 @@ static struct kvm *kvm_create_vm(void)
mutex_init(&kvm->lock);
kvm_io_bus_init(&kvm->mmio_bus);
init_rwsem(&kvm->slots_lock);
+ atomic_set(&kvm->users_count, 1);
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
@@ -189,9 +214,13 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
vfree(free->dirty_bitmap);
+ if (!dont || free->lpage_info != dont->lpage_info)
+ vfree(free->lpage_info);
+
free->npages = 0;
free->dirty_bitmap = NULL;
free->rmap = NULL;
+ free->lpage_info = NULL;
}
void kvm_free_physmem(struct kvm *kvm)
@@ -215,11 +244,25 @@ static void kvm_destroy_vm(struct kvm *kvm)
mmdrop(mm);
}
+void kvm_get_kvm(struct kvm *kvm)
+{
+ atomic_inc(&kvm->users_count);
+}
+EXPORT_SYMBOL_GPL(kvm_get_kvm);
+
+void kvm_put_kvm(struct kvm *kvm)
+{
+ if (atomic_dec_and_test(&kvm->users_count))
+ kvm_destroy_vm(kvm);
+}
+EXPORT_SYMBOL_GPL(kvm_put_kvm);
+
+
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
struct kvm *kvm = filp->private_data;
- kvm_destroy_vm(kvm);
+ kvm_put_kvm(kvm);
return 0;
}
@@ -301,6 +344,25 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
}
+ if (npages && !new.lpage_info) {
+ int largepages = npages / KVM_PAGES_PER_HPAGE;
+ if (npages % KVM_PAGES_PER_HPAGE)
+ largepages++;
+ if (base_gfn % KVM_PAGES_PER_HPAGE)
+ largepages++;
+
+ new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
+
+ if (!new.lpage_info)
+ goto out_free;
+
+ memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
+
+ if (base_gfn % KVM_PAGES_PER_HPAGE)
+ new.lpage_info[0].write_count = 1;
+ if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
+ new.lpage_info[largepages-1].write_count = 1;
+ }
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
@@ -397,6 +459,12 @@ int is_error_page(struct page *page)
}
EXPORT_SYMBOL_GPL(is_error_page);
+int is_error_pfn(pfn_t pfn)
+{
+ return pfn == bad_pfn;
+}
+EXPORT_SYMBOL_GPL(is_error_pfn);
+
static inline unsigned long bad_hva(void)
{
return PAGE_OFFSET;
@@ -444,7 +512,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
-static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
+unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
@@ -458,7 +526,7 @@ static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
/*
* Requires current->mm->mmap_sem to be held
*/
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
struct page *page[1];
unsigned long addr;
@@ -469,7 +537,7 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr)) {
get_page(bad_page);
- return bad_page;
+ return page_to_pfn(bad_page);
}
npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
@@ -477,27 +545,71 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
if (npages != 1) {
get_page(bad_page);
- return bad_page;
+ return page_to_pfn(bad_page);
}
- return page[0];
+ return page_to_pfn(page[0]);
+}
+
+EXPORT_SYMBOL_GPL(gfn_to_pfn);
+
+struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+{
+ return pfn_to_page(gfn_to_pfn(kvm, gfn));
}
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page_clean(struct page *page)
{
- put_page(page);
+ kvm_release_pfn_clean(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);
+void kvm_release_pfn_clean(pfn_t pfn)
+{
+ put_page(pfn_to_page(pfn));
+}
+EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
+
void kvm_release_page_dirty(struct page *page)
{
+ kvm_release_pfn_dirty(page_to_pfn(page));
+}
+EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
+
+void kvm_release_pfn_dirty(pfn_t pfn)
+{
+ kvm_set_pfn_dirty(pfn);
+ kvm_release_pfn_clean(pfn);
+}
+EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
+
+void kvm_set_page_dirty(struct page *page)
+{
+ kvm_set_pfn_dirty(page_to_pfn(page));
+}
+EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
+
+void kvm_set_pfn_dirty(pfn_t pfn)
+{
+ struct page *page = pfn_to_page(pfn);
if (!PageReserved(page))
SetPageDirty(page);
- put_page(page);
}
-EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
+EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
+
+void kvm_set_pfn_accessed(pfn_t pfn)
+{
+ mark_page_accessed(pfn_to_page(pfn));
+}
+EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
+
+void kvm_get_pfn(pfn_t pfn)
+{
+ get_page(pfn_to_page(pfn));
+}
+EXPORT_SYMBOL_GPL(kvm_get_pfn);
static int next_segment(unsigned long len, int offset)
{
@@ -554,7 +666,9 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
+ pagefault_disable();
r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
+ pagefault_enable();
if (r)
return -EFAULT;
return 0;
@@ -651,6 +765,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
* We will block until either an interrupt or a signal wakes us up
*/
while (!kvm_cpu_has_interrupt(vcpu)
+ && !kvm_cpu_has_pending_timer(vcpu)
&& !signal_pending(current)
&& !kvm_arch_vcpu_runnable(vcpu)) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -678,8 +793,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (vmf->pgoff == 0)
page = virt_to_page(vcpu->run);
+#ifdef CONFIG_X86
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->arch.pio_data);
+#endif
else
return VM_FAULT_SIGBUS;
get_page(page);
@@ -701,11 +818,11 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
struct kvm_vcpu *vcpu = filp->private_data;
- fput(vcpu->kvm->filp);
+ kvm_put_kvm(vcpu->kvm);
return 0;
}
-static struct file_operations kvm_vcpu_fops = {
+static const struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
.compat_ioctl = kvm_vcpu_ioctl,
@@ -723,9 +840,10 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
r = anon_inode_getfd(&fd, &inode, &file,
"kvm-vcpu", &kvm_vcpu_fops, vcpu);
- if (r)
+ if (r) {
+ kvm_put_kvm(vcpu->kvm);
return r;
- atomic_inc(&vcpu->kvm->filp->f_count);
+ }
return fd;
}
@@ -760,6 +878,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
mutex_unlock(&kvm->lock);
/* Now it's all set up, let userspace reach it */
+ kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
if (r < 0)
goto unlink;
@@ -802,28 +921,39 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
break;
case KVM_GET_REGS: {
- struct kvm_regs kvm_regs;
+ struct kvm_regs *kvm_regs;
- memset(&kvm_regs, 0, sizeof kvm_regs);
- r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
- if (r)
+ r = -ENOMEM;
+ kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
+ if (!kvm_regs)
goto out;
+ r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
+ if (r)
+ goto out_free1;
r = -EFAULT;
- if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
- goto out;
+ if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
+ goto out_free1;
r = 0;
+out_free1:
+ kfree(kvm_regs);
break;
}
case KVM_SET_REGS: {
- struct kvm_regs kvm_regs;
+ struct kvm_regs *kvm_regs;
- r = -EFAULT;
- if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
+ r = -ENOMEM;
+ kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
+ if (!kvm_regs)
goto out;
- r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
+ r = -EFAULT;
+ if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
+ goto out_free2;
+ r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
if (r)
- goto out;
+ goto out_free2;
r = 0;
+out_free2:
+ kfree(kvm_regs);
break;
}
case KVM_GET_SREGS: {
@@ -851,6 +981,30 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_GET_MP_STATE: {
+ struct kvm_mp_state mp_state;
+
+ r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
+ if (r)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(argp, &mp_state, sizeof mp_state))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_MP_STATE: {
+ struct kvm_mp_state mp_state;
+
+ r = -EFAULT;
+ if (copy_from_user(&mp_state, argp, sizeof mp_state))
+ goto out;
+ r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
+ if (r)
+ goto out;
+ r = 0;
+ break;
+ }
case KVM_TRANSLATE: {
struct kvm_translation tr;
@@ -1005,7 +1159,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static struct file_operations kvm_vm_fops = {
+static const struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
.compat_ioctl = kvm_vm_ioctl,
@@ -1024,12 +1178,10 @@ static int kvm_dev_ioctl_create_vm(void)
return PTR_ERR(kvm);
r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
if (r) {
- kvm_destroy_vm(kvm);
+ kvm_put_kvm(kvm);
return r;
}
- kvm->filp = file;
-
return fd;
}
@@ -1059,7 +1211,15 @@ static long kvm_dev_ioctl(struct file *filp,
r = -EINVAL;
if (arg)
goto out;
- r = 2 * PAGE_SIZE;
+ r = PAGE_SIZE; /* struct kvm_run */
+#ifdef CONFIG_X86
+ r += PAGE_SIZE; /* pio data page */
+#endif
+ break;
+ case KVM_TRACE_ENABLE:
+ case KVM_TRACE_PAUSE:
+ case KVM_TRACE_DISABLE:
+ r = kvm_trace_ioctl(ioctl, arg);
break;
default:
return kvm_arch_dev_ioctl(filp, ioctl, arg);
@@ -1232,9 +1392,9 @@ static void kvm_init_debug(void)
{
struct kvm_stats_debugfs_item *p;
- debugfs_dir = debugfs_create_dir("kvm", NULL);
+ kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
for (p = debugfs_entries; p->name; ++p)
- p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
+ p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
(void *)(long)p->offset,
stat_fops[p->kind]);
}
@@ -1245,7 +1405,7 @@ static void kvm_exit_debug(void)
for (p = debugfs_entries; p->name; ++p)
debugfs_remove(p->dentry);
- debugfs_remove(debugfs_dir);
+ debugfs_remove(kvm_debugfs_dir);
}
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
@@ -1272,6 +1432,7 @@ static struct sys_device kvm_sysdev = {
};
struct page *bad_page;
+pfn_t bad_pfn;
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
@@ -1313,6 +1474,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
goto out;
}
+ bad_pfn = page_to_pfn(bad_page);
+
r = kvm_arch_hardware_setup();
if (r < 0)
goto out_free_0;
@@ -1386,6 +1549,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
+ kvm_trace_cleanup();
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
sysdev_unregister(&kvm_sysdev);
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c
new file mode 100644
index 00000000000..0e495470788
--- /dev/null
+++ b/virt/kvm/kvm_trace.c
@@ -0,0 +1,276 @@
+/*
+ * kvm trace
+ *
+ * It is designed to allow debugging traces of kvm to be generated
+ * on UP / SMP machines. Each trace entry can be timestamped so that
+ * it's possible to reconstruct a chronological record of trace events.
+ * The implementation refers to blktrace kernel support.
+ *
+ * Copyright (c) 2008 Intel Corporation
+ * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
+ *
+ * Authors: Feng(Eric) Liu, eric.e.liu@intel.com
+ *
+ * Date: Feb 2008
+ */
+
+#include <linux/module.h>
+#include <linux/relay.h>
+#include <linux/debugfs.h>
+
+#include <linux/kvm_host.h>
+
+#define KVM_TRACE_STATE_RUNNING (1 << 0)
+#define KVM_TRACE_STATE_PAUSE (1 << 1)
+#define KVM_TRACE_STATE_CLEARUP (1 << 2)
+
+struct kvm_trace {
+ int trace_state;
+ struct rchan *rchan;
+ struct dentry *lost_file;
+ atomic_t lost_records;
+};
+static struct kvm_trace *kvm_trace;
+
+struct kvm_trace_probe {
+ const char *name;
+ const char *format;
+ u32 cycle_in;
+ marker_probe_func *probe_func;
+};
+
+static inline int calc_rec_size(int cycle, int extra)
+{
+ int rec_size = KVM_TRC_HEAD_SIZE;
+
+ rec_size += extra;
+ return cycle ? rec_size += KVM_TRC_CYCLE_SIZE : rec_size;
+}
+
+static void kvm_add_trace(void *probe_private, void *call_data,
+ const char *format, va_list *args)
+{
+ struct kvm_trace_probe *p = probe_private;
+ struct kvm_trace *kt = kvm_trace;
+ struct kvm_trace_rec rec;
+ struct kvm_vcpu *vcpu;
+ int i, extra, size;
+
+ if (unlikely(kt->trace_state != KVM_TRACE_STATE_RUNNING))
+ return;
+
+ rec.event = va_arg(*args, u32);
+ vcpu = va_arg(*args, struct kvm_vcpu *);
+ rec.pid = current->tgid;
+ rec.vcpu_id = vcpu->vcpu_id;
+
+ extra = va_arg(*args, u32);
+ WARN_ON(!(extra <= KVM_TRC_EXTRA_MAX));
+ extra = min_t(u32, extra, KVM_TRC_EXTRA_MAX);
+ rec.extra_u32 = extra;
+
+ rec.cycle_in = p->cycle_in;
+
+ if (rec.cycle_in) {
+ u64 cycle = 0;
+
+ cycle = get_cycles();
+ rec.u.cycle.cycle_lo = (u32)cycle;
+ rec.u.cycle.cycle_hi = (u32)(cycle >> 32);
+
+ for (i = 0; i < rec.extra_u32; i++)
+ rec.u.cycle.extra_u32[i] = va_arg(*args, u32);
+ } else {
+ for (i = 0; i < rec.extra_u32; i++)
+ rec.u.nocycle.extra_u32[i] = va_arg(*args, u32);
+ }
+
+ size = calc_rec_size(rec.cycle_in, rec.extra_u32 * sizeof(u32));
+ relay_write(kt->rchan, &rec, size);
+}
+
+static struct kvm_trace_probe kvm_trace_probes[] = {
+ { "kvm_trace_entryexit", "%u %p %u %u %u %u %u %u", 1, kvm_add_trace },
+ { "kvm_trace_handler", "%u %p %u %u %u %u %u %u", 0, kvm_add_trace },
+};
+
+static int lost_records_get(void *data, u64 *val)
+{
+ struct kvm_trace *kt = data;
+
+ *val = atomic_read(&kt->lost_records);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kvm_trace_lost_ops, lost_records_get, NULL, "%llu\n");
+
+/*
+ * The relay channel is used in "no-overwrite" mode, it keeps trace of how
+ * many times we encountered a full subbuffer, to tell user space app the
+ * lost records there were.
+ */
+static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf, size_t prev_padding)
+{
+ struct kvm_trace *kt;
+
+ if (!relay_buf_full(buf))
+ return 1;
+
+ kt = buf->chan->private_data;
+ atomic_inc(&kt->lost_records);
+
+ return 0;
+}
+
+static struct dentry *kvm_create_buf_file_callack(const char *filename,
+ struct dentry *parent,
+ int mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+static int kvm_remove_buf_file_callback(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+static struct rchan_callbacks kvm_relay_callbacks = {
+ .subbuf_start = kvm_subbuf_start_callback,
+ .create_buf_file = kvm_create_buf_file_callack,
+ .remove_buf_file = kvm_remove_buf_file_callback,
+};
+
+static int do_kvm_trace_enable(struct kvm_user_trace_setup *kuts)
+{
+ struct kvm_trace *kt;
+ int i, r = -ENOMEM;
+
+ if (!kuts->buf_size || !kuts->buf_nr)
+ return -EINVAL;
+
+ kt = kzalloc(sizeof(*kt), GFP_KERNEL);
+ if (!kt)
+ goto err;
+
+ r = -EIO;
+ atomic_set(&kt->lost_records, 0);
+ kt->lost_file = debugfs_create_file("lost_records", 0444, kvm_debugfs_dir,
+ kt, &kvm_trace_lost_ops);
+ if (!kt->lost_file)
+ goto err;
+
+ kt->rchan = relay_open("trace", kvm_debugfs_dir, kuts->buf_size,
+ kuts->buf_nr, &kvm_relay_callbacks, kt);
+ if (!kt->rchan)
+ goto err;
+
+ kvm_trace = kt;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
+ struct kvm_trace_probe *p = &kvm_trace_probes[i];
+
+ r = marker_probe_register(p->name, p->format, p->probe_func, p);
+ if (r)
+ printk(KERN_INFO "Unable to register probe %s\n",
+ p->name);
+ }
+
+ kvm_trace->trace_state = KVM_TRACE_STATE_RUNNING;
+
+ return 0;
+err:
+ if (kt) {
+ if (kt->lost_file)
+ debugfs_remove(kt->lost_file);
+ if (kt->rchan)
+ relay_close(kt->rchan);
+ kfree(kt);
+ }
+ return r;
+}
+
+static int kvm_trace_enable(char __user *arg)
+{
+ struct kvm_user_trace_setup kuts;
+ int ret;
+
+ ret = copy_from_user(&kuts, arg, sizeof(kuts));
+ if (ret)
+ return -EFAULT;
+
+ ret = do_kvm_trace_enable(&kuts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int kvm_trace_pause(void)
+{
+ struct kvm_trace *kt = kvm_trace;
+ int r = -EINVAL;
+
+ if (kt == NULL)
+ return r;
+
+ if (kt->trace_state == KVM_TRACE_STATE_RUNNING) {
+ kt->trace_state = KVM_TRACE_STATE_PAUSE;
+ relay_flush(kt->rchan);
+ r = 0;
+ }
+
+ return r;
+}
+
+void kvm_trace_cleanup(void)
+{
+ struct kvm_trace *kt = kvm_trace;
+ int i;
+
+ if (kt == NULL)
+ return;
+
+ if (kt->trace_state == KVM_TRACE_STATE_RUNNING ||
+ kt->trace_state == KVM_TRACE_STATE_PAUSE) {
+
+ kt->trace_state = KVM_TRACE_STATE_CLEARUP;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
+ struct kvm_trace_probe *p = &kvm_trace_probes[i];
+ marker_probe_unregister(p->name, p->probe_func, p);
+ }
+
+ relay_close(kt->rchan);
+ debugfs_remove(kt->lost_file);
+ kfree(kt);
+ }
+}
+
+int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ long r = -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (ioctl) {
+ case KVM_TRACE_ENABLE:
+ r = kvm_trace_enable(argp);
+ break;
+ case KVM_TRACE_PAUSE:
+ r = kvm_trace_pause();
+ break;
+ case KVM_TRACE_DISABLE:
+ r = 0;
+ kvm_trace_cleanup();
+ break;
+ }
+
+ return r;
+}