aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CREDITS17
-rw-r--r--Documentation/ABI/testing/pstore6
-rw-r--r--Documentation/ABI/testing/sysfs-platform-ideapad-laptop17
-rw-r--r--Documentation/CodingStyle23
-rw-r--r--Documentation/acpi/apei/einj.txt11
-rw-r--r--Documentation/device-mapper/dm-crypt.txt21
-rw-r--r--Documentation/device-mapper/dm-flakey.txt48
-rw-r--r--Documentation/device-mapper/dm-raid.txt138
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio_keys.txt2
-rw-r--r--Documentation/devicetree/bindings/input/fsl-mma8450.txt11
-rw-r--r--Documentation/dmaengine.txt234
-rw-r--r--Documentation/fault-injection/fault-injection.txt3
-rw-r--r--Documentation/feature-removal-schedule.txt20
-rw-r--r--Documentation/frv/booting.txt13
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kernel-parameters.txt11
-rw-r--r--Documentation/m68k/kernel-options.txt14
-rw-r--r--Documentation/networking/bonding.txt2
-rw-r--r--Documentation/power/runtime_pm.txt10
-rw-r--r--MAINTAINERS24
-rw-r--r--Makefile4
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/arm/kernel/armksyms.c3
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/arm/lib/Makefile2
-rw-r--r--arch/arm/lib/sha1.S211
-rw-r--r--arch/arm/mach-msm/Kconfig4
-rw-r--r--arch/arm/mach-msm/Makefile8
-rw-r--r--arch/arm/mach-msm/gpio.c376
-rw-r--r--arch/arm/mach-msm/gpio_hw.h278
-rw-r--r--arch/arm/mach-msm/gpiomux.h17
-rw-r--r--arch/arm/mach-msm/include/mach/msm_gpiomux.h38
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x00.h10
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x30.h10
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x50.h10
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h2
-rw-r--r--arch/arm/mach-msm/io.c12
-rw-r--r--arch/arm/mach-omap2/display.c26
-rw-r--r--arch/arm/mach-shmobile/clock-sh7367.c3
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c4
-rw-r--r--arch/arm/mach-shmobile/clock-sh7377.c3
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c4
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/cris/arch-v10/drivers/sync_serial.c6
-rw-r--r--arch/cris/arch-v10/kernel/irq.c3
-rw-r--r--arch/cris/include/asm/thread_info.h6
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/ia64/include/asm/gpio.h55
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/Kconfig.mmu6
-rw-r--r--arch/m68k/amiga/chipram.c136
-rw-r--r--arch/m68k/atari/stram.c354
-rw-r--r--arch/m68k/include/asm/atari_stram.h3
-rw-r--r--arch/m68k/include/asm/atarihw.h4
-rw-r--r--arch/m68k/kernel/setup_mm.c2
-rw-r--r--arch/m68k/math-emu/fp_log.c3
-rw-r--r--arch/m68k/math-emu/multi_arith.h530
-rw-r--r--arch/m68k/mm/init_mm.c5
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/atomic.h4
-rw-r--r--arch/parisc/include/asm/futex.h66
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/include/asm/ipl.h1
-rw-r--r--arch/s390/include/asm/lowcore.h11
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/system.h1
-rw-r--r--arch/s390/kernel/asm-offsets.c10
-rw-r--r--arch/s390/kernel/base.S36
-rw-r--r--arch/s390/kernel/compat_signal.c43
-rw-r--r--arch/s390/kernel/entry.S28
-rw-r--r--arch/s390/kernel/entry64.S20
-rw-r--r--arch/s390/kernel/ipl.c45
-rw-r--r--arch/s390/kernel/reipl64.S80
-rw-r--r--arch/s390/kernel/setup.c25
-rw-r--r--arch/s390/kernel/signal.c61
-rw-r--r--arch/s390/kernel/smp.c24
-rw-r--r--arch/s390/mm/maccess.c16
-rw-r--r--arch/s390/mm/pgtable.c1
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/Makefile1
-rw-r--r--arch/sh/boards/board-apsh4a3a.c2
-rw-r--r--arch/sh/boards/board-apsh4ad0a.c2
-rw-r--r--arch/sh/boards/board-sh7785lcr.c2
-rw-r--r--arch/sh/boards/board-urquell.c2
-rw-r--r--arch/sh/boards/mach-highlander/setup.c2
-rw-r--r--arch/sh/boards/mach-sdk7786/setup.c2
-rw-r--r--arch/sh/include/cpu-sh3/cpu/serial.h10
-rw-r--r--arch/sh/include/cpu-sh4a/cpu/serial.h7
-rw-r--r--arch/sh/kernel/cpu/clock-cpg.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile18
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh770x.c33
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh7710.c20
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh7720.c37
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c5
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c5
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c4
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c4
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c33
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7366.c25
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c46
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c4
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c84
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c36
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7763.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7780.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7785.c79
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7786.c115
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-shx3.c67
-rw-r--r--arch/sh/kernel/cpu/sh4a/serial-sh7722.c23
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c1
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c7
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c9
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c9
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c8
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c8
-rw-r--r--arch/sh/kernel/idle.c6
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/Kbuild5
-rw-r--r--arch/sparc/include/asm/bitops_64.h49
-rw-r--r--arch/sparc/include/asm/div64.h1
-rw-r--r--arch/sparc/include/asm/elf_64.h65
-rw-r--r--arch/sparc/include/asm/hypervisor.h13
-rw-r--r--arch/sparc/include/asm/irq_regs.h1
-rw-r--r--arch/sparc/include/asm/local.h6
-rw-r--r--arch/sparc/include/asm/local64.h1
-rw-r--r--arch/sparc/include/asm/tsb.h51
-rw-r--r--arch/sparc/kernel/cpu.c1
-rw-r--r--arch/sparc/kernel/ds.c30
-rw-r--r--arch/sparc/kernel/entry.h14
-rw-r--r--arch/sparc/kernel/head_64.S2
-rw-r--r--arch/sparc/kernel/hvapi.c6
-rw-r--r--arch/sparc/kernel/hvcalls.S7
-rw-r--r--arch/sparc/kernel/ioport.c32
-rw-r--r--arch/sparc/kernel/kernel.h15
-rw-r--r--arch/sparc/kernel/ktlb.S24
-rw-r--r--arch/sparc/kernel/mdesc.c30
-rw-r--r--arch/sparc/kernel/setup_64.c186
-rw-r--r--arch/sparc/kernel/sparc_ksyms_64.c11
-rw-r--r--arch/sparc/kernel/sstate.c9
-rw-r--r--arch/sparc/kernel/unaligned_64.c15
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S21
-rw-r--r--arch/sparc/lib/Makefile4
-rw-r--r--arch/sparc/lib/NG2page.S61
-rw-r--r--arch/sparc/lib/NGpage.S114
-rw-r--r--arch/sparc/lib/atomic32.c2
-rw-r--r--arch/sparc/lib/ffs.S84
-rw-r--r--arch/sparc/lib/hweight.S51
-rw-r--r--arch/sparc/mm/init_64.c42
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/Kbuild38
-rw-r--r--arch/tile/include/asm/bug.h1
-rw-r--r--arch/tile/include/asm/bugs.h1
-rw-r--r--arch/tile/include/asm/cputime.h1
-rw-r--r--arch/tile/include/asm/device.h1
-rw-r--r--arch/tile/include/asm/div64.h1
-rw-r--r--arch/tile/include/asm/emergency-restart.h1
-rw-r--r--arch/tile/include/asm/errno.h1
-rw-r--r--arch/tile/include/asm/fb.h1
-rw-r--r--arch/tile/include/asm/fcntl.h1
-rw-r--r--arch/tile/include/asm/fixmap.h6
-rw-r--r--arch/tile/include/asm/ioctl.h1
-rw-r--r--arch/tile/include/asm/ioctls.h1
-rw-r--r--arch/tile/include/asm/ipc.h1
-rw-r--r--arch/tile/include/asm/ipcbuf.h1
-rw-r--r--arch/tile/include/asm/irq_regs.h1
-rw-r--r--arch/tile/include/asm/kdebug.h1
-rw-r--r--arch/tile/include/asm/local.h1
-rw-r--r--arch/tile/include/asm/module.h1
-rw-r--r--arch/tile/include/asm/msgbuf.h1
-rw-r--r--arch/tile/include/asm/mutex.h1
-rw-r--r--arch/tile/include/asm/param.h1
-rw-r--r--arch/tile/include/asm/parport.h1
-rw-r--r--arch/tile/include/asm/poll.h1
-rw-r--r--arch/tile/include/asm/posix_types.h1
-rw-r--r--arch/tile/include/asm/resource.h1
-rw-r--r--arch/tile/include/asm/scatterlist.h1
-rw-r--r--arch/tile/include/asm/sembuf.h1
-rw-r--r--arch/tile/include/asm/serial.h1
-rw-r--r--arch/tile/include/asm/shmbuf.h1
-rw-r--r--arch/tile/include/asm/shmparam.h1
-rw-r--r--arch/tile/include/asm/socket.h1
-rw-r--r--arch/tile/include/asm/sockios.h1
-rw-r--r--arch/tile/include/asm/statfs.h1
-rw-r--r--arch/tile/include/asm/termbits.h1
-rw-r--r--arch/tile/include/asm/termios.h1
-rw-r--r--arch/tile/include/asm/types.h1
-rw-r--r--arch/tile/include/asm/ucontext.h1
-rw-r--r--arch/tile/include/asm/xor.h1
-rw-r--r--arch/tile/include/hv/drv_srom_intf.h41
-rw-r--r--arch/tile/kernel/time.c5
-rw-r--r--arch/tile/mm/init.c3
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/io.h3
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/acpi/cstate.c23
-rw-r--r--arch/x86/kernel/process.c23
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/platform/mrst/Makefile1
-rw-r--r--arch/x86/platform/mrst/pmu.c817
-rw-r--r--arch/x86/platform/mrst/pmu.h234
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/setup.c13
-rw-r--r--arch/x86/xen/trace.c1
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-timeout.c5
-rw-r--r--crypto/md5.c92
-rw-r--r--drivers/acpi/acpica/acglobal.h6
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/acpredef.h1
-rw-r--r--drivers/acpi/acpica/nspredef.c19
-rw-r--r--drivers/acpi/acpica/nsrepair2.c15
-rw-r--r--drivers/acpi/acpica/tbinstal.c27
-rw-r--r--drivers/acpi/apei/Kconfig11
-rw-r--r--drivers/acpi/apei/apei-base.c35
-rw-r--r--drivers/acpi/apei/apei-internal.h15
-rw-r--r--drivers/acpi/apei/einj.c43
-rw-r--r--drivers/acpi/apei/erst-dbg.c6
-rw-r--r--drivers/acpi/apei/erst.c32
-rw-r--r--drivers/acpi/apei/ghes.c431
-rw-r--r--drivers/acpi/apei/hest.c17
-rw-r--r--drivers/acpi/battery.c86
-rw-r--r--drivers/acpi/bus.c14
-rw-r--r--drivers/acpi/dock.c4
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/pci_irq.c58
-rw-r--r--drivers/acpi/pci_root.c3
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/sbs.c13
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/video.c2
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/char/Kconfig11
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/ramoops.c8
-rw-r--r--drivers/char/random.c349
-rw-r--r--drivers/char/tile-srom.c481
-rw-r--r--drivers/char/tpm/tpm_tis.c7
-rw-r--r--drivers/connector/cn_proc.c8
-rw-r--r--drivers/cpuidle/cpuidle.c50
-rw-r--r--drivers/cpuidle/cpuidle.h1
-rw-r--r--drivers/cpuidle/driver.c3
-rw-r--r--drivers/cpuidle/governor.c3
-rw-r--r--drivers/dma/TODO1
-rw-r--r--drivers/dma/amba-pl08x.c246
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c19
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/intel_mid_dma.c2
-rw-r--r--drivers/dma/ioat/dma_v3.c8
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/mv_xor.c3
-rw-r--r--drivers/dma/mxs-dma.c13
-rw-r--r--drivers/dma/pch_dma.c127
-rw-r--r--drivers/dma/pl330.c64
-rw-r--r--drivers/dma/shdma.c88
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/dma/ste_dma40.c270
-rw-r--r--drivers/dma/ste_dma40_ll.h3
-rw-r--r--drivers/eisa/pci_eisa.c4
-rw-r--r--drivers/firmware/efivars.c243
-rw-r--r--drivers/gpio/Kconfig22
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-ab8500.c2
-rw-r--r--drivers/gpio/gpio-msm-v1.c636
-rw-r--r--drivers/gpio/gpio-msm-v2.c (renamed from arch/arm/mach-msm/gpio-v2.c)4
-rw-r--r--drivers/gpio/gpio-tps65912.c156
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c33
-rw-r--r--drivers/gpu/drm/drm_irq.c26
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c189
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h36
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c138
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c111
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h23
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c158
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/atom.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/ide/cy82c693.c2
-rw-r--r--drivers/ide/ide_platform.c6
-rw-r--r--drivers/input/keyboard/gpio_keys.c2
-rw-r--r--drivers/input/keyboard/lm8323.c9
-rw-r--r--drivers/input/keyboard/tegra-kbc.c5
-rw-r--r--drivers/input/misc/kxtj9.c1
-rw-r--r--drivers/input/misc/mma8450.c8
-rw-r--r--drivers/input/mouse/hgpk.c1
-rw-r--r--drivers/input/serio/xilinx_ps2.c2
-rw-r--r--drivers/input/touchscreen/ad7879.c4
-rw-r--r--drivers/md/Kconfig5
-rw-r--r--drivers/md/dm-crypt.c62
-rw-r--r--drivers/md/dm-flakey.c270
-rw-r--r--drivers/md/dm-io.c29
-rw-r--r--drivers/md/dm-ioctl.c89
-rw-r--r--drivers/md/dm-kcopyd.c42
-rw-r--r--drivers/md/dm-log-userspace-base.c3
-rw-r--r--drivers/md/dm-log.c32
-rw-r--r--drivers/md/dm-mpath.c147
-rw-r--r--drivers/md/dm-raid.c621
-rw-r--r--drivers/md/dm-snap-persistent.c80
-rw-r--r--drivers/md/dm-snap.c84
-rw-r--r--drivers/md/dm-table.c155
-rw-r--r--drivers/md/dm.c75
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/mfd/Kconfig53
-rw-r--r--drivers/mfd/Makefile8
-rw-r--r--drivers/mfd/aat2870-core.c535
-rw-r--r--drivers/mfd/ab3550-core.c41
-rw-r--r--drivers/mfd/ab8500-core.c231
-rw-r--r--drivers/mfd/ab8500-debugfs.c41
-rw-r--r--drivers/mfd/jz4740-adc.c90
-rw-r--r--drivers/mfd/lpc_sch.c49
-rw-r--r--drivers/mfd/max8997-irq.c2
-rw-r--r--drivers/mfd/max8998.c2
-rw-r--r--drivers/mfd/omap-usb-host.c4
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/stmpe.h1
-rw-r--r--drivers/mfd/tps65910.c13
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/mfd/tps65912-core.c177
-rw-r--r--drivers/mfd/tps65912-i2c.c139
-rw-r--r--drivers/mfd/tps65912-irq.c224
-rw-r--r--drivers/mfd/tps65912-spi.c142
-rw-r--r--drivers/mfd/twl-core.c2
-rw-r--r--drivers/mfd/twl4030-madc.c8
-rw-r--r--drivers/mfd/twl6030-pwm.c2
-rw-r--r--drivers/mfd/wm831x-auxadc.c299
-rw-r--r--drivers/mfd/wm831x-core.c259
-rw-r--r--drivers/mfd/wm831x-irq.c77
-rw-r--r--drivers/mfd/wm8350-irq.c18
-rw-r--r--drivers/mfd/wm8994-core.c33
-rw-r--r--drivers/mfd/wm8994-irq.c12
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c10
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c218
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h3
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/e1000/e1000_hw.c3
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c11
-rw-r--r--drivers/net/e1000e/ich8lan.c7
-rw-r--r--drivers/net/e1000e/lib.c1
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/e1000e/phy.c2
-rw-r--r--drivers/net/igb/e1000_nvm.c1
-rw-r--r--drivers/net/igb/igb_ethtool.c5
-rw-r--r--drivers/net/igb/igb_main.c4
-rw-r--r--drivers/net/igbvf/netdev.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c18
-rw-r--r--drivers/net/ixgb/ixgb_ee.c9
-rw-r--r--drivers/net/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/macb.c3
-rw-r--r--drivers/net/mlx4/en_port.c2
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/port.c9
-rw-r--r--drivers/net/niu.c4
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/r8169.c28
-rw-r--r--drivers/net/sis190.c12
-rw-r--r--drivers/net/usb/cdc_ncm.c156
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c27
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c6
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c3
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c5
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c20
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/of/base.c7
-rw-r--r--drivers/of/gpio.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c10
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c11
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c11
-rw-r--r--drivers/pcmcia/pxa2xx_colibri.c11
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c10
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c11
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c34
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c10
-rw-r--r--drivers/pcmcia/soc_common.c7
-rw-r--r--drivers/platform/x86/Kconfig8
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c40
-rw-r--r--drivers/platform/x86/acerhdf.c13
-rw-r--r--drivers/platform/x86/asus-laptop.c9
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c27
-rw-r--r--drivers/platform/x86/asus-wmi.c239
-rw-r--r--drivers/platform/x86/asus-wmi.h7
-rw-r--r--drivers/platform/x86/dell-laptop.c1
-rw-r--r--drivers/platform/x86/dell-wmi.c10
-rw-r--r--drivers/platform/x86/eeepc-wmi.c27
-rw-r--r--drivers/platform/x86/ideapad-laptop.c195
-rw-r--r--drivers/platform/x86/intel_ips.c4
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c26
-rw-r--r--drivers/platform/x86/intel_rar_register.c4
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c10
-rw-r--r--drivers/platform/x86/msi-wmi.c1
-rw-r--r--drivers/platform/x86/samsung-laptop.c20
-rw-r--r--drivers/platform/x86/samsung-q10.c196
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c11
-rw-r--r--drivers/power/Kconfig14
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/apm_power.c8
-rw-r--r--drivers/power/bq20z75.c103
-rw-r--r--drivers/power/gpio-charger.c2
-rw-r--r--drivers/power/max17042_battery.c175
-rw-r--r--drivers/power/max8903_charger.c16
-rw-r--r--drivers/power/max8997_charger.c206
-rw-r--r--drivers/power/max8998_charger.c218
-rw-r--r--drivers/power/s3c_adc_battery.c2
-rw-r--r--drivers/power/twl4030_charger.c10
-rw-r--r--drivers/power/wm831x_backup.c12
-rw-r--r--drivers/power/wm831x_power.c26
-rw-r--r--drivers/regulator/Kconfig13
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/aat2870-regulator.c232
-rw-r--r--drivers/regulator/core.c190
-rw-r--r--drivers/regulator/dummy.c32
-rw-r--r--drivers/regulator/tps65910-regulator.c63
-rw-r--r--drivers/regulator/tps65912-regulator.c800
-rw-r--r--drivers/regulator/twl-regulator.c66
-rw-r--r--drivers/regulator/wm831x-dcdc.c126
-rw-r--r--drivers/regulator/wm831x-ldo.c25
-rw-r--r--drivers/regulator/wm8994-regulator.c4
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/s390/block/dasd.c9
-rw-r--r--drivers/s390/block/dasd_eckd.c9
-rw-r--r--drivers/s390/block/dasd_proc.c4
-rw-r--r--drivers/s390/char/sclp_async.c9
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/cio/qdio_debug.c12
-rw-r--r--drivers/s390/cio/qdio_main.c21
-rw-r--r--drivers/sh/clk/core.c29
-rw-r--r--drivers/spi/spi-pl022.c11
-rw-r--r--drivers/target/iscsi/Kconfig1
-rw-r--r--drivers/target/iscsi/iscsi_target.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/target_core_transport.c33
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h5
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c1
-rw-r--r--drivers/target/tcm_fc/tfc_io.c121
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/thermal_sys.c142
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/imx.c13
-rw-r--r--drivers/tty/serial/sh-sci.c757
-rw-r--r--drivers/tty/serial/sh-sci.h434
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/aat2870_bl.c246
-rw-r--r--drivers/video/omap2/displays/panel-taal.c55
-rw-r--r--drivers/video/omap2/dss/Kconfig12
-rw-r--r--drivers/video/omap2/dss/core.c21
-rw-r--r--drivers/video/omap2/dss/dispc.c562
-rw-r--r--drivers/video/omap2/dss/display.c57
-rw-r--r--drivers/video/omap2/dss/dpi.c73
-rw-r--r--drivers/video/omap2/dss/dsi.c296
-rw-r--r--drivers/video/omap2/dss/dss.c583
-rw-r--r--drivers/video/omap2/dss/dss.h54
-rw-r--r--drivers/video/omap2/dss/dss_features.c36
-rw-r--r--drivers/video/omap2/dss/dss_features.h7
-rw-r--r--drivers/video/omap2/dss/hdmi.c162
-rw-r--r--drivers/video/omap2/dss/manager.c351
-rw-r--r--drivers/video/omap2/dss/overlay.c27
-rw-r--r--drivers/video/omap2/dss/rfbi.c114
-rw-r--r--drivers/video/omap2/dss/sdi.c40
-rw-r--r--drivers/video/omap2/dss/venc.c183
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c72
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c166
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c34
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h37
-rw-r--r--drivers/video/savage/savagefb.h2
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/nv_tco.c8
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--fs/9p/acl.c6
-rw-r--r--fs/9p/acl.h4
-rw-r--r--fs/9p/vfs_inode_dotl.c6
-rw-r--r--fs/Kconfig15
-rw-r--r--fs/block_dev.c5
-rw-r--r--fs/btrfs/Makefile4
-rw-r--r--fs/btrfs/acl.c27
-rw-r--r--fs/btrfs/compression.c14
-rw-r--r--fs/btrfs/ctree.h30
-rw-r--r--fs/btrfs/dir-item.c30
-rw-r--r--fs/btrfs/extent-tree.c45
-rw-r--r--fs/btrfs/extent_io.c139
-rw-r--r--fs/btrfs/extent_io.h20
-rw-r--r--fs/btrfs/extent_map.c155
-rw-r--r--fs/btrfs/file-item.c7
-rw-r--r--fs/btrfs/file.c21
-rw-r--r--fs/btrfs/inode.c145
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/ref-cache.c68
-rw-r--r--fs/btrfs/ref-cache.h52
-rw-r--r--fs/btrfs/root-tree.c5
-rw-r--r--fs/btrfs/transaction.c65
-rw-r--r--fs/btrfs/tree-log.c12
-rw-r--r--fs/btrfs/volumes.c12
-rw-r--r--fs/cifs/cifs_dfs_ref.c5
-rw-r--r--fs/cifs/cifsencrypt.c16
-rw-r--r--fs/cifs/cifsfs.c22
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/connect.c659
-rw-r--r--fs/cifs/dns_resolve.c4
-rw-r--r--fs/cifs/file.c27
-rw-r--r--fs/cifs/inode.c14
-rw-r--r--fs/cifs/misc.c11
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/dcache.c72
-rw-r--r--fs/exofs/Kbuild5
-rw-r--r--fs/exofs/Kconfig4
-rw-r--r--fs/exofs/exofs.h159
-rw-r--r--fs/exofs/inode.c152
-rw-r--r--fs/exofs/ore.c (renamed from fs/exofs/ios.c)370
-rw-r--r--fs/exofs/pnfs.h45
-rw-r--r--fs/exofs/super.c251
-rw-r--r--fs/ext2/acl.c8
-rw-r--r--fs/ext3/acl.c9
-rw-r--r--fs/ext4/Makefile2
-rw-r--r--fs/ext4/acl.c9
-rw-r--r--fs/ext4/balloc.c48
-rw-r--r--fs/ext4/block_validity.c21
-rw-r--r--fs/ext4/ext4.h55
-rw-r--r--fs/ext4/extents.c129
-rw-r--r--fs/ext4/fsync.c26
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/indirect.c1482
-rw-r--r--fs/ext4/inode.c1596
-rw-r--r--fs/ext4/ioctl.c12
-rw-r--r--fs/ext4/mballoc.c230
-rw-r--r--fs/ext4/mballoc.h1
-rw-r--r--fs/ext4/namei.c21
-rw-r--r--fs/ext4/page-io.c6
-rw-r--r--fs/ext4/resize.c199
-rw-r--r--fs/ext4/super.c88
-rw-r--r--fs/ext4/truncate.h43
-rw-r--r--fs/fs-writeback.c7
-rw-r--r--fs/generic_acl.c13
-rw-r--r--fs/gfs2/acl.c6
-rw-r--r--fs/hppfs/hppfs.c1
-rw-r--r--fs/inode.c14
-rw-r--r--fs/jbd2/checkpoint.c5
-rw-r--r--fs/jbd2/journal.c67
-rw-r--r--fs/jffs2/acl.c4
-rw-r--r--fs/jffs2/acl.h2
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jfs/acl.c4
-rw-r--r--fs/jfs/xattr.c4
-rw-r--r--fs/namei.c117
-rw-r--r--fs/nfs/Kconfig8
-rw-r--r--fs/nfs/Makefile1
-rw-r--r--fs/nfs/blocklayout/Makefile5
-rw-r--r--fs/nfs/blocklayout/blocklayout.c1019
-rw-r--r--fs/nfs/blocklayout/blocklayout.h207
-rw-r--r--fs/nfs/blocklayout/blocklayoutdev.c410
-rw-r--r--fs/nfs/blocklayout/blocklayoutdm.c111
-rw-r--r--fs/nfs/blocklayout/extents.c935
-rw-r--r--fs/nfs/client.c11
-rw-r--r--fs/nfs/dir.c57
-rw-r--r--fs/nfs/nfs3acl.c2
-rw-r--r--fs/nfs/nfs3proc.c6
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4filelayout.c2
-rw-r--r--fs/nfs/nfs4proc.c62
-rw-r--r--fs/nfs/nfs4xdr.c233
-rw-r--r--fs/nfs/pnfs.c86
-rw-r--r--fs/nfs/pnfs.h28
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/posix_acl.c16
-rw-r--r--fs/proc/base.c12
-rw-r--r--fs/pstore/inode.c12
-rw-r--r--fs/pstore/internal.h2
-rw-r--r--fs/pstore/platform.c30
-rw-r--r--fs/reiserfs/xattr_acl.c10
-rw-r--r--fs/stack.c5
-rw-r--r--fs/stat.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c6
-rw-r--r--include/acpi/acpi_drivers.h2
-rw-r--r--include/acpi/acpixf.h3
-rw-r--r--include/acpi/apei.h5
-rw-r--r--include/acpi/processor.h2
-rw-r--r--include/drm/drm_crtc.h3
-rw-r--r--include/drm/i915_drm.h2
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/amba/pl08x.h9
-rw-r--r--include/linux/bitmap.h1
-rw-r--r--include/linux/cpuidle.h4
-rw-r--r--include/linux/cred.h11
-rw-r--r--include/linux/cryptohash.h7
-rw-r--r--include/linux/dcache.h30
-rw-r--r--include/linux/device-mapper.h43
-rw-r--r--include/linux/dm-ioctl.h4
-rw-r--r--include/linux/dm-kcopyd.h15
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/fault-inject.h18
-rw-r--r--include/linux/fs.h68
-rw-r--r--include/linux/genalloc.h34
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/idr.h4
-rw-r--r--include/linux/input.h2
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/jbd2.h6
-rw-r--r--include/linux/llist.h126
-rw-r--r--include/linux/memcontrol.h8
-rw-r--r--include/linux/mfd/aat2870.h181
-rw-r--r--include/linux/mfd/ab8500.h8
-rw-r--r--include/linux/mfd/max8997.h7
-rw-r--r--include/linux/mfd/max8998.h12
-rw-r--r--include/linux/mfd/stmpe.h3
-rw-r--r--include/linux/mfd/tps65910.h1
-rw-r--r--include/linux/mfd/tps65912.h327
-rw-r--r--include/linux/mfd/wm831x/core.h119
-rw-r--r--include/linux/mfd/wm831x/pdata.h3
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/nfs.h2
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/nfs_fs_sb.h4
-rw-r--r--include/linux/nfs_xdr.h27
-rw-r--r--include/linux/of.h15
-rw-r--r--include/linux/of_gpio.h9
-rw-r--r--include/linux/pci_ids.h10
-rw-r--r--include/linux/posix_acl.h82
-rw-r--r--include/linux/power/bq20z75.h3
-rw-r--r--include/linux/power/max17042_battery.h91
-rw-r--r--include/linux/pstore.h9
-rw-r--r--include/linux/radix-tree.h37
-rw-r--r--include/linux/random.h12
-rw-r--r--include/linux/regulator/consumer.h3
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/serial_sci.h75
-rw-r--r--include/linux/sh_clk.h4
-rw-r--r--include/linux/sh_dma.h8
-rw-r--r--include/linux/shmem_fs.h17
-rw-r--r--include/linux/swapops.h23
-rw-r--r--include/linux/thermal.h22
-rw-r--r--include/net/cipso_ipv4.h2
-rw-r--r--include/net/dst.h17
-rw-r--r--include/net/netlabel.h2
-rw-r--r--include/net/secure_seq.h20
-rw-r--r--include/pcmcia/device_id.h9
-rw-r--r--include/scsi/osd_ore.h125
-rw-r--r--include/trace/events/ext4.h87
-rw-r--r--include/trace/events/jbd2.h36
-rw-r--r--include/video/omapdss.h26
-rw-r--r--init/main.c7
-rw-r--r--ipc/shm.c16
-rw-r--r--kernel/debug/gdbstub.c22
-rw-r--r--kernel/debug/kdb/kdb_bt.c5
-rw-r--r--kernel/debug/kdb/kdb_cmds4
-rw-r--r--kernel/debug/kdb/kdb_debugger.c21
-rw-r--r--kernel/debug/kdb/kdb_io.c36
-rw-r--r--kernel/debug/kdb/kdb_main.c4
-rw-r--r--kernel/debug/kdb/kdb_private.h3
-rw-r--r--kernel/futex.c54
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/lockdep.c37
-rw-r--r--kernel/resource.c21
-rw-r--r--kernel/taskstats.c18
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/fault-inject.c20
-rw-r--r--lib/genalloc.c300
-rw-r--r--lib/idr.c67
-rw-r--r--lib/llist.c129
-rw-r--r--lib/md5.c95
-rw-r--r--lib/radix-tree.c121
-rw-r--r--lib/sha1.c212
-rw-r--r--mm/failslab.c14
-rw-r--r--mm/filemap.c106
-rw-r--r--mm/memcontrol.c66
-rw-r--r--mm/memory-failure.c92
-rw-r--r--mm/mincore.c11
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/page_alloc.c13
-rw-r--r--mm/shmem.c1493
-rw-r--r--mm/slab.c99
-rw-r--r--mm/slub.c22
-rw-r--r--mm/swapfile.c20
-rw-r--r--mm/truncate.c8
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/secure_seq.c184
-rw-r--r--net/core/skbuff.c17
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c9
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_hashtables.c1
-rw-r--r--net/ipv4/inetpeer.c1
-rw-r--r--net/ipv4/ip_output.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c1
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/datagram.c11
-rw-r--r--net/ipv6/inet6_hashtables.c1
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/route.c35
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c1
-rw-r--r--net/netlabel/Makefile2
-rw-r--r--net/netlabel/netlabel_addrlist.c2
-rw-r--r--net/netlabel/netlabel_addrlist.h2
-rw-r--r--net/netlabel/netlabel_cipso_v4.c2
-rw-r--r--net/netlabel/netlabel_cipso_v4.h2
-rw-r--r--net/netlabel/netlabel_domainhash.c2
-rw-r--r--net/netlabel/netlabel_domainhash.h2
-rw-r--r--net/netlabel/netlabel_kapi.c2
-rw-r--r--net/netlabel/netlabel_mgmt.c2
-rw-r--r--net/netlabel/netlabel_mgmt.h2
-rw-r--r--net/netlabel/netlabel_unlabeled.c2
-rw-r--r--net/netlabel/netlabel_unlabeled.h2
-rw-r--r--net/netlabel/netlabel_user.c2
-rw-r--r--net/netlabel/netlabel_user.h2
-rw-r--r--net/sched/sch_sfq.c7
-rw-r--r--net/socket.c73
-rw-r--r--net/sunrpc/xprt.c1
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/xfrm/xfrm_algo.c4
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/selinux/include/netif.h2
-rw-r--r--security/selinux/include/netlabel.h2
-rw-r--r--security/selinux/include/netnode.h2
-rw-r--r--security/selinux/include/netport.h2
-rw-r--r--security/selinux/netif.c2
-rw-r--r--security/selinux/netlabel.c2
-rw-r--r--security/selinux/netnode.c2
-rw-r--r--security/selinux/netport.c2
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--security/selinux/ss/ebitmap.c2
-rw-r--r--security/selinux/ss/mls.c2
-rw-r--r--security/selinux/ss/mls.h2
-rw-r--r--security/selinux/ss/policydb.c2
-rw-r--r--security/selinux/ss/services.c2
-rw-r--r--security/smack/smack_lsm.c2
-rw-r--r--sound/core/pcm_compat.c2
-rw-r--r--sound/core/rtctimer.c2
-rw-r--r--sound/pci/asihpi/hpidspcd.c9
-rw-r--r--sound/pci/asihpi/hpioctl.c19
-rw-r--r--sound/pci/rme9652/hdspm.c109
-rw-r--r--sound/soc/txx9/txx9aclc.c1
-rw-r--r--tools/power/cpupower/.gitignore22
-rw-r--r--tools/power/cpupower/Makefile279
-rw-r--r--tools/power/cpupower/README49
-rw-r--r--tools/power/cpupower/ToDo11
-rw-r--r--tools/power/cpupower/bench/Makefile29
-rw-r--r--tools/power/cpupower/bench/README-BENCH124
-rw-r--r--tools/power/cpupower/bench/benchmark.c194
-rw-r--r--tools/power/cpupower/bench/benchmark.h29
-rw-r--r--tools/power/cpupower/bench/config.h36
-rw-r--r--tools/power/cpupower/bench/cpufreq-bench_plot.sh104
-rw-r--r--tools/power/cpupower/bench/cpufreq-bench_script.sh101
-rw-r--r--tools/power/cpupower/bench/example.cfg11
-rw-r--r--tools/power/cpupower/bench/main.c202
-rw-r--r--tools/power/cpupower/bench/parse.c225
-rw-r--r--tools/power/cpupower/bench/parse.h53
-rw-r--r--tools/power/cpupower/bench/system.c191
-rw-r--r--tools/power/cpupower/bench/system.h29
-rw-r--r--tools/power/cpupower/debug/i386/Makefile20
-rw-r--r--tools/power/cpupower/debug/i386/centrino-decode.c113
-rw-r--r--tools/power/cpupower/debug/i386/dump_psb.c196
-rw-r--r--tools/power/cpupower/debug/i386/intel_gsic.c78
-rw-r--r--tools/power/cpupower/debug/i386/powernow-k8-decode.c96
-rw-r--r--tools/power/cpupower/debug/kernel/Makefile23
-rw-r--r--tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c113
-rw-r--r--tools/power/cpupower/debug/x86_64/Makefile14
l---------tools/power/cpupower/debug/x86_64/centrino-decode.c1
l---------tools/power/cpupower/debug/x86_64/powernow-k8-decode.c1
-rw-r--r--tools/power/cpupower/lib/cpufreq.c208
-rw-r--r--tools/power/cpupower/lib/cpufreq.h223
-rw-r--r--tools/power/cpupower/lib/sysfs.c672
-rw-r--r--tools/power/cpupower/lib/sysfs.h31
-rw-r--r--tools/power/cpupower/man/cpupower-frequency-info.176
-rw-r--r--tools/power/cpupower/man/cpupower-frequency-set.154
-rw-r--r--tools/power/cpupower/man/cpupower-info.119
-rw-r--r--tools/power/cpupower/man/cpupower-monitor.1179
-rw-r--r--tools/power/cpupower/man/cpupower-set.1103
-rw-r--r--tools/power/cpupower/man/cpupower.172
-rw-r--r--tools/power/cpupower/po/cs.po944
-rw-r--r--tools/power/cpupower/po/de.po961
-rw-r--r--tools/power/cpupower/po/fr.po947
-rw-r--r--tools/power/cpupower/po/it.po961
-rw-r--r--tools/power/cpupower/po/pt.po957
-rw-r--r--tools/power/cpupower/utils/builtin.h18
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c708
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c358
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c244
-rw-r--r--tools/power/cpupower/utils/cpupower-info.c153
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c153
-rw-r--r--tools/power/cpupower/utils/cpupower.c203
-rw-r--r--tools/power/cpupower/utils/helpers/amd.c137
-rw-r--r--tools/power/cpupower/utils/helpers/bitmask.c292
-rw-r--r--tools/power/cpupower/utils/helpers/bitmask.h33
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c176
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h178
-rw-r--r--tools/power/cpupower/utils/helpers/misc.c27
-rw-r--r--tools/power/cpupower/utils/helpers/msr.c115
-rw-r--r--tools/power/cpupower/utils/helpers/pci.c44
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.c358
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.h28
-rw-r--r--tools/power/cpupower/utils/helpers/topology.c108
-rw-r--r--tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c338
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c196
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c448
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h68
-rw-r--r--tools/power/cpupower/utils/idle_monitor/idle_monitors.def7
-rw-r--r--tools/power/cpupower/utils/idle_monitor/idle_monitors.h18
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c255
-rw-r--r--tools/power/cpupower/utils/idle_monitor/nhm_idle.c216
-rw-r--r--tools/power/cpupower/utils/idle_monitor/snb_idle.c190
-rwxr-xr-xtools/power/cpupower/utils/version-gen.sh35
-rw-r--r--tools/power/x86/turbostat/turbostat.c46
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c5
880 files changed, 40610 insertions, 13251 deletions
diff --git a/CREDITS b/CREDITS
index 1deb331d96e..07e32a87d95 100644
--- a/CREDITS
+++ b/CREDITS
@@ -504,7 +504,7 @@ N: Dominik Brodowski
E: linux@brodo.de
W: http://www.brodo.de/
P: 1024D/725B37C6 190F 3E77 9C89 3B6D BECD 46EE 67C3 0308 725B 37C6
-D: parts of CPUFreq code, ACPI bugfixes
+D: parts of CPUFreq code, ACPI bugfixes, PCMCIA rewrite, cpufrequtils
S: Tuebingen, Germany
N: Andries Brouwer
@@ -857,6 +857,10 @@ S: One Dell Way
S: Round Rock, TX 78682
S: USA
+N: Mattia Dongili
+E: malattia@gmail.com
+D: cpufrequtils (precursor to cpupowerutils)
+
N: Ben Dooks
E: ben-linux@fluff.org
E: ben@simtec.co.uk
@@ -1883,6 +1887,11 @@ S: Kruislaan 419
S: 1098 VA Amsterdam
S: The Netherlands
+N: Goran Koruga
+E: korugag@siol.net
+D: cpufrequtils (precursor to cpupowerutils)
+S: Slovenia
+
N: Jiri Kosina
E: jikos@jikos.cz
E: jkosina@suse.cz
@@ -2916,6 +2925,12 @@ S: Schlossbergring 9
S: 79098 Freiburg
S: Germany
+N: Thomas Renninger
+E: trenn@suse.de
+D: cpupowerutils
+S: SUSE Linux GmbH
+S: Germany
+
N: Joerg Reuter
E: jreuter@yaina.de
W: http://yaina.de/jreuter/
diff --git a/Documentation/ABI/testing/pstore b/Documentation/ABI/testing/pstore
index ddf451ee2a0..ff1df4e3b05 100644
--- a/Documentation/ABI/testing/pstore
+++ b/Documentation/ABI/testing/pstore
@@ -39,3 +39,9 @@ Description: Generic interface to platform dependent persistent storage.
multiple) files based on the record size of the underlying
persistent storage until at least this amount is reached.
Default is 10 Kbytes.
+
+ Pstore only supports one backend at a time. If multiple
+ backends are available, the preferred backend may be
+ set by passing the pstore.backend= argument to the kernel at
+ boot time.
+
diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
index 807fca2ae2a..ff53183c384 100644
--- a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
@@ -4,3 +4,20 @@ KernelVersion: 2.6.37
Contact: "Ike Panhc <ike.pan@canonical.com>"
Description:
Control the power of camera module. 1 means on, 0 means off.
+
+What: /sys/devices/platform/ideapad/cfg
+Date: Jun 2011
+KernelVersion: 3.1
+Contact: "Ike Panhc <ike.pan@canonical.com>"
+Description:
+ Ideapad capability bits.
+ Bit 8-10: 1 - Intel graphic only
+ 2 - ATI graphic only
+ 3 - Nvidia graphic only
+ 4 - Intel and ATI graphic
+ 5 - Intel and Nvidia graphic
+ Bit 16: Bluetooth exist (1 for exist)
+ Bit 17: 3G exist (1 for exist)
+ Bit 18: Wifi exist (1 for exist)
+ Bit 19: Camera exist (1 for exist)
+
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index fa6e25b94a5..c940239d967 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -80,22 +80,13 @@ available tools.
The limit on the length of lines is 80 columns and this is a strongly
preferred limit.
-Statements longer than 80 columns will be broken into sensible chunks.
-Descendants are always substantially shorter than the parent and are placed
-substantially to the right. The same applies to function headers with a long
-argument list. Long strings are as well broken into shorter strings. The
-only exception to this is where exceeding 80 columns significantly increases
-readability and does not hide information.
-
-void fun(int a, int b, int c)
-{
- if (condition)
- printk(KERN_WARNING "Warning this is a long printk with "
- "3 parameters a: %u b: %u "
- "c: %u \n", a, b, c);
- else
- next_statement;
-}
+Statements longer than 80 columns will be broken into sensible chunks, unless
+exceeding 80 columns significantly increases readability and does not hide
+information. Descendants are always substantially shorter than the parent and
+are placed substantially to the right. The same applies to function headers
+with a long argument list. However, never break user-visible strings such as
+printk messages, because that breaks the ability to grep for them.
+
Chapter 3: Placing Braces and Spaces
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
index dfab71848dc..5cc699ba545 100644
--- a/Documentation/acpi/apei/einj.txt
+++ b/Documentation/acpi/apei/einj.txt
@@ -48,12 +48,19 @@ directory apei/einj. The following files are provided.
- param1
This file is used to set the first error parameter value. Effect of
parameter depends on error_type specified. For memory error, this is
- physical memory address.
+ physical memory address. Only available if param_extension module
+ parameter is specified.
- param2
This file is used to set the second error parameter value. Effect of
parameter depends on error_type specified. For memory error, this is
- physical memory address mask.
+ physical memory address mask. Only available if param_extension
+ module parameter is specified.
+
+Injecting parameter support is a BIOS version specific extension, that
+is, it only works on some BIOS version. If you want to use it, please
+make sure your BIOS version has the proper support and specify
+"param_extension=y" in module parameter.
For more information about EINJ, please refer to ACPI specification
version 4.0, section 17.5.
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index 6b5c42dbbe8..2c656ae43ba 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -4,7 +4,8 @@ dm-crypt
Device-Mapper's "crypt" target provides transparent encryption of block devices
using the kernel crypto API.
-Parameters: <cipher> <key> <iv_offset> <device path> <offset>
+Parameters: <cipher> <key> <iv_offset> <device path> \
+ <offset> [<#opt_params> <opt_params>]
<cipher>
Encryption cipher and an optional IV generation mode.
@@ -37,6 +38,24 @@ Parameters: <cipher> <key> <iv_offset> <device path> <offset>
<offset>
Starting sector within the device where the encrypted data begins.
+<#opt_params>
+ Number of optional parameters. If there are no optional parameters,
+ the optional paramaters section can be skipped or #opt_params can be zero.
+ Otherwise #opt_params is the number of following arguments.
+
+ Example of optional parameters section:
+ 1 allow_discards
+
+allow_discards
+ Block discard requests (a.k.a. TRIM) are passed through the crypt device.
+ The default is to ignore discard requests.
+
+ WARNING: Assess the specific security risks carefully before enabling this
+ option. For example, allowing discards on encrypted devices may lead to
+ the leak of information about the ciphertext device (filesystem type,
+ used space etc.) if the discarded blocks can be located easily on the
+ device later.
+
Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
diff --git a/Documentation/device-mapper/dm-flakey.txt b/Documentation/device-mapper/dm-flakey.txt
index c8efdfd19a6..6ff5c232722 100644
--- a/Documentation/device-mapper/dm-flakey.txt
+++ b/Documentation/device-mapper/dm-flakey.txt
@@ -1,17 +1,53 @@
dm-flakey
=========
-This target is the same as the linear target except that it returns I/O
-errors periodically. It's been found useful in simulating failing
-devices for testing purposes.
+This target is the same as the linear target except that it exhibits
+unreliable behaviour periodically. It's been found useful in simulating
+failing devices for testing purposes.
Starting from the time the table is loaded, the device is available for
-<up interval> seconds, then returns errors for <down interval> seconds,
-and then this cycle repeats.
+<up interval> seconds, then exhibits unreliable behaviour for <down
+interval> seconds, and then this cycle repeats.
-Parameters: <dev path> <offset> <up interval> <down interval>
+Also, consider using this in combination with the dm-delay target too,
+which can delay reads and writes and/or send them to different
+underlying devices.
+
+Table parameters
+----------------
+ <dev path> <offset> <up interval> <down interval> \
+ [<num_features> [<feature arguments>]]
+
+Mandatory parameters:
<dev path>: Full pathname to the underlying block-device, or a
"major:minor" device-number.
<offset>: Starting sector within the device.
<up interval>: Number of seconds device is available.
<down interval>: Number of seconds device returns errors.
+
+Optional feature parameters:
+ If no feature parameters are present, during the periods of
+ unreliability, all I/O returns errors.
+
+ drop_writes:
+ All write I/O is silently ignored.
+ Read I/O is handled correctly.
+
+ corrupt_bio_byte <Nth_byte> <direction> <value> <flags>:
+ During <down interval>, replace <Nth_byte> of the data of
+ each matching bio with <value>.
+
+ <Nth_byte>: The offset of the byte to replace.
+ Counting starts at 1, to replace the first byte.
+ <direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
+ 'w' is incompatible with drop_writes.
+ <value>: The value (from 0-255) to write.
+ <flags>: Perform the replacement only if bio->bi_rw has all the
+ selected flags set.
+
+Examples:
+ corrupt_bio_byte 32 r 1 0
+ - replaces the 32nd byte of READ bios with the value 1
+
+ corrupt_bio_byte 224 w 0 32
+ - replaces the 224th byte of REQ_META (=32) bios with the value 0
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 33b6b7071ac..2a8c11331d2 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -1,70 +1,108 @@
-Device-mapper RAID (dm-raid) is a bridge from DM to MD. It
-provides a way to use device-mapper interfaces to access the MD RAID
-drivers.
+dm-raid
+-------
-As with all device-mapper targets, the nominal public interfaces are the
-constructor (CTR) tables and the status outputs (both STATUSTYPE_INFO
-and STATUSTYPE_TABLE). The CTR table looks like the following:
+The device-mapper RAID (dm-raid) target provides a bridge from DM to MD.
+It allows the MD RAID drivers to be accessed using a device-mapper
+interface.
-1: <s> <l> raid \
-2: <raid_type> <#raid_params> <raid_params> \
-3: <#raid_devs> <meta_dev1> <dev1> .. <meta_devN> <devN>
-
-Line 1 contains the standard first three arguments to any device-mapper
-target - the start, length, and target type fields. The target type in
-this case is "raid".
-
-Line 2 contains the arguments that define the particular raid
-type/personality/level, the required arguments for that raid type, and
-any optional arguments. Possible raid types include: raid4, raid5_la,
-raid5_ls, raid5_rs, raid6_zr, raid6_nr, and raid6_nc. (raid1 is
-planned for the future.) The list of required and optional parameters
-is the same for all the current raid types. The required parameters are
-positional, while the optional parameters are given as key/value pairs.
-The possible parameters are as follows:
- <chunk_size> Chunk size in sectors.
- [[no]sync] Force/Prevent RAID initialization
- [rebuild <idx>] Rebuild the drive indicated by the index
- [daemon_sleep <ms>] Time between bitmap daemon work to clear bits
- [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
- [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
- [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
- [stripe_cache <sectors>] Stripe cache size for higher RAIDs
-
-Line 3 contains the list of devices that compose the array in
-metadata/data device pairs. If the metadata is stored separately, a '-'
-is given for the metadata device position. If a drive has failed or is
-missing at creation time, a '-' can be given for both the metadata and
-data drives for a given position.
-
-NB. Currently all metadata devices must be specified as '-'.
-
-Examples:
-# RAID4 - 4 data drives, 1 parity
+The target is named "raid" and it accepts the following parameters:
+
+ <raid_type> <#raid_params> <raid_params> \
+ <#raid_devs> <metadata_dev0> <dev0> [.. <metadata_devN> <devN>]
+
+<raid_type>:
+ raid1 RAID1 mirroring
+ raid4 RAID4 dedicated parity disk
+ raid5_la RAID5 left asymmetric
+ - rotating parity 0 with data continuation
+ raid5_ra RAID5 right asymmetric
+ - rotating parity N with data continuation
+ raid5_ls RAID5 left symmetric
+ - rotating parity 0 with data restart
+ raid5_rs RAID5 right symmetric
+ - rotating parity N with data restart
+ raid6_zr RAID6 zero restart
+ - rotating parity zero (left-to-right) with data restart
+ raid6_nr RAID6 N restart
+ - rotating parity N (right-to-left) with data restart
+ raid6_nc RAID6 N continue
+ - rotating parity N (right-to-left) with data continuation
+
+ Refererence: Chapter 4 of
+ http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf
+
+<#raid_params>: The number of parameters that follow.
+
+<raid_params> consists of
+ Mandatory parameters:
+ <chunk_size>: Chunk size in sectors. This parameter is often known as
+ "stripe size". It is the only mandatory parameter and
+ is placed first.
+
+ followed by optional parameters (in any order):
+ [sync|nosync] Force or prevent RAID initialization.
+
+ [rebuild <idx>] Rebuild drive number idx (first drive is 0).
+
+ [daemon_sleep <ms>]
+ Interval between runs of the bitmap daemon that
+ clear bits. A longer interval means less bitmap I/O but
+ resyncing after a failure is likely to take longer.
+
+ [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
+ [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
+ [write_mostly <idx>] Drive index is write-mostly
+ [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
+ [stripe_cache <sectors>] Stripe cache size (higher RAIDs only)
+ [region_size <sectors>]
+ The region_size multiplied by the number of regions is the
+ logical size of the array. The bitmap records the device
+ synchronisation state for each region.
+
+<#raid_devs>: The number of devices composing the array.
+ Each device consists of two entries. The first is the device
+ containing the metadata (if any); the second is the one containing the
+ data.
+
+ If a drive has failed or is missing at creation time, a '-' can be
+ given for both the metadata and data drives for a given position.
+
+
+Example tables
+--------------
+# RAID4 - 4 data drives, 1 parity (no metadata devices)
# No metadata devices specified to hold superblock/bitmap info
# Chunk size of 1MiB
# (Lines separated for easy reading)
+
0 1960893648 raid \
raid4 1 2048 \
5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
-# RAID4 - 4 data drives, 1 parity (no metadata devices)
+# RAID4 - 4 data drives, 1 parity (with metadata devices)
# Chunk size of 1MiB, force RAID initialization,
# min recovery rate at 20 kiB/sec/disk
+
0 1960893648 raid \
- raid4 4 2048 min_recovery_rate 20 sync\
- 5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
+ raid4 4 2048 sync min_recovery_rate 20 \
+ 5 8:17 8:18 8:33 8:34 8:49 8:50 8:65 8:66 8:81 8:82
-Performing a 'dmsetup table' should display the CTR table used to
-construct the mapping (with possible reordering of optional
-parameters).
+'dmsetup table' displays the table used to construct the mapping.
+The optional parameters are always printed in the order listed
+above with "sync" or "nosync" always output ahead of the other
+arguments, regardless of the order used when originally loading the table.
+Arguments that can be repeated are ordered by value.
-Performing a 'dmsetup status' will yield information on the state and
-health of the array. The output is as follows:
+'dmsetup status' yields information on the state and health of the
+array.
+The output is as follows:
1: <s> <l> raid \
2: <raid_type> <#devices> <1 health char for each dev> <resync_ratio>
-Line 1 is standard DM output. Line 2 is best shown by example:
+Line 1 is the standard output produced by device-mapper.
+Line 2 is produced by the raid target, and best explained by example:
0 1960893648 raid raid4 5 AAAAA 2/490221568
Here we can see the RAID type is raid4, there are 5 devices - all of
which are 'A'live, and the array is 2/490221568 complete with recovery.
+Faulty or missing devices are marked 'D'. Devices that are out-of-sync
+are marked 'a'.
diff --git a/Documentation/devicetree/bindings/gpio/gpio_keys.txt b/Documentation/devicetree/bindings/gpio/gpio_keys.txt
index 7190c99d761..5c2c02140a6 100644
--- a/Documentation/devicetree/bindings/gpio/gpio_keys.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio_keys.txt
@@ -10,7 +10,7 @@ Optional properties:
Each button (key) is represented as a sub-node of "gpio-keys":
Subnode properties:
- - gpios: OF devcie-tree gpio specificatin.
+ - gpios: OF device-tree gpio specification.
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
diff --git a/Documentation/devicetree/bindings/input/fsl-mma8450.txt b/Documentation/devicetree/bindings/input/fsl-mma8450.txt
new file mode 100644
index 00000000000..a00c94ccbde
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/fsl-mma8450.txt
@@ -0,0 +1,11 @@
+* Freescale MMA8450 3-Axis Accelerometer
+
+Required properties:
+- compatible : "fsl,mma8450".
+
+Example:
+
+accelerometer: mma8450@1c {
+ compatible = "fsl,mma8450";
+ reg = <0x1c>;
+};
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt
index 5a0cb1ef616..94b7e0f96b3 100644
--- a/Documentation/dmaengine.txt
+++ b/Documentation/dmaengine.txt
@@ -10,87 +10,181 @@ NOTE: For DMA Engine usage in async_tx please see:
Below is a guide to device driver writers on how to use the Slave-DMA API of the
DMA Engine. This is applicable only for slave DMA usage only.
-The slave DMA usage consists of following steps
+The slave DMA usage consists of following steps:
1. Allocate a DMA slave channel
2. Set slave and controller specific parameters
3. Get a descriptor for transaction
-4. Submit the transaction and wait for callback notification
+4. Submit the transaction
+5. Issue pending requests and wait for callback notification
1. Allocate a DMA slave channel
-Channel allocation is slightly different in the slave DMA context, client
-drivers typically need a channel from a particular DMA controller only and even
-in some cases a specific channel is desired. To request a channel
-dma_request_channel() API is used.
-
-Interface:
-struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
- dma_filter_fn filter_fn,
- void *filter_param);
-where dma_filter_fn is defined as:
-typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
-
-When the optional 'filter_fn' parameter is set to NULL dma_request_channel
-simply returns the first channel that satisfies the capability mask. Otherwise,
-when the mask parameter is insufficient for specifying the necessary channel,
-the filter_fn routine can be used to disposition the available channels in the
-system. The filter_fn routine is called once for each free channel in the
-system. Upon seeing a suitable channel filter_fn returns DMA_ACK which flags
-that channel to be the return value from dma_request_channel. A channel
-allocated via this interface is exclusive to the caller, until
-dma_release_channel() is called.
+
+ Channel allocation is slightly different in the slave DMA context,
+ client drivers typically need a channel from a particular DMA
+ controller only and even in some cases a specific channel is desired.
+ To request a channel dma_request_channel() API is used.
+
+ Interface:
+ struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
+ dma_filter_fn filter_fn,
+ void *filter_param);
+ where dma_filter_fn is defined as:
+ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
+
+ The 'filter_fn' parameter is optional, but highly recommended for
+ slave and cyclic channels as they typically need to obtain a specific
+ DMA channel.
+
+ When the optional 'filter_fn' parameter is NULL, dma_request_channel()
+ simply returns the first channel that satisfies the capability mask.
+
+ Otherwise, the 'filter_fn' routine will be called once for each free
+ channel which has a capability in 'mask'. 'filter_fn' is expected to
+ return 'true' when the desired DMA channel is found.
+
+ A channel allocated via this interface is exclusive to the caller,
+ until dma_release_channel() is called.
2. Set slave and controller specific parameters
-Next step is always to pass some specific information to the DMA driver. Most of
-the generic information which a slave DMA can use is in struct dma_slave_config.
-It allows the clients to specify DMA direction, DMA addresses, bus widths, DMA
-burst lengths etc. If some DMA controllers have more parameters to be sent then
-they should try to embed struct dma_slave_config in their controller specific
-structure. That gives flexibility to client to pass more parameters, if
-required.
-
-Interface:
-int dmaengine_slave_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+
+ Next step is always to pass some specific information to the DMA
+ driver. Most of the generic information which a slave DMA can use
+ is in struct dma_slave_config. This allows the clients to specify
+ DMA direction, DMA addresses, bus widths, DMA burst lengths etc
+ for the peripheral.
+
+ If some DMA controllers have more parameters to be sent then they
+ should try to embed struct dma_slave_config in their controller
+ specific structure. That gives flexibility to client to pass more
+ parameters, if required.
+
+ Interface:
+ int dmaengine_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+
+ Please see the dma_slave_config structure definition in dmaengine.h
+ for a detailed explaination of the struct members. Please note
+ that the 'direction' member will be going away as it duplicates the
+ direction given in the prepare call.
3. Get a descriptor for transaction
-For slave usage the various modes of slave transfers supported by the
-DMA-engine are:
-slave_sg - DMA a list of scatter gather buffers from/to a peripheral
-dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
+
+ For slave usage the various modes of slave transfers supported by the
+ DMA-engine are:
+
+ slave_sg - DMA a list of scatter gather buffers from/to a peripheral
+ dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
operation is explicitly stopped.
-The non NULL return of this transfer API represents a "descriptor" for the given
-transaction.
-
-Interface:
-struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_sg)(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
+
+ A non-NULL return of this transfer API represents a "descriptor" for
+ the given transaction.
+
+ Interface:
+ struct dma_async_tx_descriptor *(*chan->device->device_prep_slave_sg)(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags);
-struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
+
+ struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_data_direction direction);
-4. Submit the transaction and wait for callback notification
-To schedule the transaction to be scheduled by dma device, the "descriptor"
-returned in above (3) needs to be submitted.
-To tell the dma driver that a transaction is ready to be serviced, the
-descriptor->submit() callback needs to be invoked. This chains the descriptor to
-the pending queue.
-The transactions in the pending queue can be activated by calling the
-issue_pending API. If channel is idle then the first transaction in queue is
-started and subsequent ones queued up.
-On completion of the DMA operation the next in queue is submitted and a tasklet
-triggered. The tasklet would then call the client driver completion callback
-routine for notification, if set.
-Interface:
-void dma_async_issue_pending(struct dma_chan *chan);
-
-==============================================================================
-
-Additional usage notes for dma driver writers
-1/ Although DMA engine specifies that completion callback routines cannot submit
-any new operations, but typically for slave DMA subsequent transaction may not
-be available for submit prior to callback routine being called. This requirement
-is not a requirement for DMA-slave devices. But they should take care to drop
-the spin-lock they might be holding before calling the callback routine
+ The peripheral driver is expected to have mapped the scatterlist for
+ the DMA operation prior to calling device_prep_slave_sg, and must
+ keep the scatterlist mapped until the DMA operation has completed.
+ The scatterlist must be mapped using the DMA struct device. So,
+ normal setup should look like this:
+
+ nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len);
+ if (nr_sg == 0)
+ /* error */
+
+ desc = chan->device->device_prep_slave_sg(chan, sgl, nr_sg,
+ direction, flags);
+
+ Once a descriptor has been obtained, the callback information can be
+ added and the descriptor must then be submitted. Some DMA engine
+ drivers may hold a spinlock between a successful preparation and
+ submission so it is important that these two operations are closely
+ paired.
+
+ Note:
+ Although the async_tx API specifies that completion callback
+ routines cannot submit any new operations, this is not the
+ case for slave/cyclic DMA.
+
+ For slave DMA, the subsequent transaction may not be available
+ for submission prior to callback function being invoked, so
+ slave DMA callbacks are permitted to prepare and submit a new
+ transaction.
+
+ For cyclic DMA, a callback function may wish to terminate the
+ DMA via dmaengine_terminate_all().
+
+ Therefore, it is important that DMA engine drivers drop any
+ locks before calling the callback function which may cause a
+ deadlock.
+
+ Note that callbacks will always be invoked from the DMA
+ engines tasklet, never from interrupt context.
+
+4. Submit the transaction
+
+ Once the descriptor has been prepared and the callback information
+ added, it must be placed on the DMA engine drivers pending queue.
+
+ Interface:
+ dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
+
+ This returns a cookie can be used to check the progress of DMA engine
+ activity via other DMA engine calls not covered in this document.
+
+ dmaengine_submit() will not start the DMA operation, it merely adds
+ it to the pending queue. For this, see step 5, dma_async_issue_pending.
+
+5. Issue pending DMA requests and wait for callback notification
+
+ The transactions in the pending queue can be activated by calling the
+ issue_pending API. If channel is idle then the first transaction in
+ queue is started and subsequent ones queued up.
+
+ On completion of each DMA operation, the next in queue is started and
+ a tasklet triggered. The tasklet will then call the client driver
+ completion callback routine for notification, if set.
+
+ Interface:
+ void dma_async_issue_pending(struct dma_chan *chan);
+
+Further APIs:
+
+1. int dmaengine_terminate_all(struct dma_chan *chan)
+
+ This causes all activity for the DMA channel to be stopped, and may
+ discard data in the DMA FIFO which hasn't been fully transferred.
+ No callback functions will be called for any incomplete transfers.
+
+2. int dmaengine_pause(struct dma_chan *chan)
+
+ This pauses activity on the DMA channel without data loss.
+
+3. int dmaengine_resume(struct dma_chan *chan)
+
+ Resume a previously paused DMA channel. It is invalid to resume a
+ channel which is not currently paused.
+
+4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+
+ This can be used to check the status of the channel. Please see
+ the documentation in include/linux/dmaengine.h for a more complete
+ description of this API.
+
+ This can be used in conjunction with dma_async_is_complete() and
+ the cookie returned from 'descriptor->submit()' to check for
+ completion of a specific DMA transaction.
+
+ Note:
+ Not all DMA engine drivers can return reliable information for
+ a running DMA channel. It is recommended that DMA engine users
+ pause or stop (via dmaengine_terminate_all) the channel before
+ using this API.
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 7be15e44d48..82a5d250d75 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -143,8 +143,7 @@ o provide a way to configure fault attributes
failslab, fail_page_alloc, and fail_make_request use this way.
Helper functions:
- init_fault_attr_dentries(entries, attr, name);
- void cleanup_fault_attr_dentries(entries);
+ fault_create_debugfs_attr(name, parent, attr);
- module parameters
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index ea0bace0124..c4a6e148732 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -296,15 +296,6 @@ Who: Ravikiran Thirumalai <kiran@scalex86.org>
---------------------------
-What: CONFIG_THERMAL_HWMON
-When: January 2009
-Why: This option was introduced just to allow older lm-sensors userspace
- to keep working over the upgrade to 2.6.26. At the scheduled time of
- removal fixed lm-sensors (2.x or 3.x) should be readily available.
-Who: Rene Herman <rene.herman@gmail.com>
-
----------------------------
-
What: Code that is now under CONFIG_WIRELESS_EXT_SYSFS
(in net/core/net-sysfs.c)
When: After the only user (hal) has seen a release with the patches
@@ -590,3 +581,14 @@ Why: This driver has been superseded by g_mass_storage.
Who: Alan Stern <stern@rowland.harvard.edu>
----------------------------
+
+What: threeg and interface sysfs files in /sys/devices/platform/acer-wmi
+When: 2012
+Why: In 3.0, we can now autodetect internal 3G device and already have
+ the threeg rfkill device. So, we plan to remove threeg sysfs support
+ for it's no longer necessary.
+
+ We also plan to remove interface sysfs file that exposed which ACPI-WMI
+ interface that was used by acer-wmi driver. It will replaced by
+ information log when acer-wmi initial.
+Who: Lee, Chun-Yi <jlee@novell.com>
diff --git a/Documentation/frv/booting.txt b/Documentation/frv/booting.txt
index ace200b7c21..37c4d84a0e5 100644
--- a/Documentation/frv/booting.txt
+++ b/Documentation/frv/booting.txt
@@ -106,13 +106,20 @@ separated by spaces:
To use the first on-chip serial port at baud rate 115200, no parity, 8
bits, and no flow control.
- (*) root=/dev/<xxxx>
+ (*) root=<xxxx>
- This specifies the device upon which the root filesystem resides. For
- example:
+ This specifies the device upon which the root filesystem resides. It
+ may be specified by major and minor number, device path, or even
+ partition uuid, if supported. For example:
/dev/nfs NFS root filesystem
/dev/mtdblock3 Fourth RedBoot partition on the System Flash
+ PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF/PARTNROFF=1
+ first partition after the partition with the given UUID
+ 253:0 Device with major 253 and minor 0
+
+ Authoritative information can be found in
+ "Documentation/kernel-parameters.txt".
(*) rw
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 72ba8d51dbc..845a191004b 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -292,6 +292,7 @@ Code Seq#(hex) Include File Comments
<mailto:buk@buks.ipn.de>
0xA0 all linux/sdp/sdp.h Industrial Device Project
<mailto:kenji@bitgate.com>
+0xA2 00-0F arch/tile/include/asm/hardwall.h
0xA3 80-8F Port ACL in development:
<mailto:tlewis@mindspring.com>
0xA3 90-9F linux/dtlk.h
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4ca93898fbd..e279b724291 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -163,6 +163,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See also Documentation/power/pm.txt, pci=noacpi
+ acpi_rsdp= [ACPI,EFI,KEXEC]
+ Pass the RSDP address to the kernel, mostly used
+ on machines running EFI runtime service to boot the
+ second kernel for kdump.
+
acpi_apic_instance= [ACPI, IOAPIC]
Format: <int>
2: use 2nd APIC table, if available
@@ -546,6 +551,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
/proc/<pid>/coredump_filter.
See also Documentation/filesystems/proc.txt.
+ cpuidle.off=1 [CPU_IDLE]
+ disable the cpuidle sub-system
+
cpcihp_generic= [HW,PCI] Generic port I/O CompactPCI driver
Format:
<first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
@@ -2153,6 +2161,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
[HW,MOUSE] Controls Logitech smartscroll autorepeat.
0 = disabled, 1 = enabled (default).
+ pstore.backend= Specify the name of the pstore backend to use
+
pt. [PARIDE]
See Documentation/blockdev/paride.txt.
@@ -2238,6 +2248,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
ro [KNL] Mount root device read-only on boot
root= [KNL] Root filesystem
+ See name_to_dev_t comment in init/do_mounts.c.
rootdelay= [KNL] Delay (in seconds) to pause before attempting to
mount the root filesystem
diff --git a/Documentation/m68k/kernel-options.txt b/Documentation/m68k/kernel-options.txt
index c93bed66e25..97d45f276fe 100644
--- a/Documentation/m68k/kernel-options.txt
+++ b/Documentation/m68k/kernel-options.txt
@@ -129,6 +129,20 @@ decimal 11 is the major of SCSI CD-ROMs, and the minor 0 stands for
the first of these. You can find out all valid major numbers by
looking into include/linux/major.h.
+In addition to major and minor numbers, if the device containing your
+root partition uses a partition table format with unique partition
+identifiers, then you may use them. For instance,
+"root=PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF". It is also
+possible to reference another partition on the same device using a
+known partition UUID as the starting point. For example,
+if partition 5 of the device has the UUID of
+00112233-4455-6677-8899-AABBCCDDEEFF then partition 3 may be found as
+follows:
+ PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF/PARTNROFF=-2
+
+Authoritative information can be found in
+"Documentation/kernel-parameters.txt".
+
2.2) ro, rw
-----------
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 675612ff41a..5dd960d7517 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -599,7 +599,7 @@ num_unsol_na
affect only the active-backup mode. These options were added for
bonding versions 3.3.0 and 3.4.0 respectively.
- From Linux 2.6.40 and bonding version 3.7.1, these notifications
+ From Linux 3.0 and bonding version 3.7.1, these notifications
are generated by the ipv4 and ipv6 code and the numbers of
repetitions cannot be set independently.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 14dd3c6ad97..4ce5450ab6e 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -54,11 +54,10 @@ referred to as subsystem-level callbacks in what follows.
By default, the callbacks are always invoked in process context with interrupts
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
-callbacks should be invoked in atomic context with interrupts disabled
-(->runtime_idle() is still invoked the default way). This implies that these
-callback routines must not block or sleep, but it also means that the
-synchronous helper functions listed at the end of Section 4 can be used within
-an interrupt handler or in an atomic context.
+callbacks should be invoked in atomic context with interrupts disabled.
+This implies that these callback routines must not block or sleep, but it also
+means that the synchronous helper functions listed at the end of Section 4 can
+be used within an interrupt handler or in an atomic context.
The subsystem-level suspend callback is _entirely_ _responsible_ for handling
the suspend of the device as appropriate, which may, but need not include
@@ -483,6 +482,7 @@ pm_runtime_suspend()
pm_runtime_autosuspend()
pm_runtime_resume()
pm_runtime_get_sync()
+pm_runtime_put_sync()
pm_runtime_put_sync_suspend()
5. Runtime PM Initialization, Device Probing and Removal
diff --git a/MAINTAINERS b/MAINTAINERS
index cf5e8a79309..51d42fbc8dc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1925,6 +1925,12 @@ S: Maintained
F: arch/x86/kernel/cpuid.c
F: arch/x86/kernel/msr.c
+CPU POWER MONITORING SUBSYSTEM
+M: Dominik Brodowski <linux@dominikbrodowski.net>
+M: Thomas Renninger <trenn@suse.de>
+S: Maintained
+F: tools/power/cpupower
+
CPUSETS
M: Paul Menage <menage@google.com>
W: http://www.bullopensource.org/cpuset/
@@ -2637,9 +2643,8 @@ S: Maintained
F: arch/x86/math-emu/
FRAME RELAY DLCI/FRAD (Sangoma drivers too)
-M: Mike McLagan <mike.mclagan@linux.org>
L: netdev@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/net/wan/dlci.c
F: drivers/net/wan/sdla.c
@@ -3361,6 +3366,12 @@ F: drivers/net/ixgb/
F: drivers/net/ixgbe/
F: drivers/net/ixgbevf/
+INTEL MRST PMU DRIVER
+M: Len Brown <len.brown@intel.com>
+L: linux-pm@lists.linux-foundation.org
+S: Supported
+F: arch/x86/platform/mrst/pmu.*
+
INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
L: linux-wireless@vger.kernel.org
S: Orphan
@@ -4403,10 +4414,10 @@ F: net/*/netfilter/
F: net/netfilter/
NETLABEL
-M: Paul Moore <paul.moore@hp.com>
+M: Paul Moore <paul@paul-moore.com>
W: http://netlabel.sf.net
L: netdev@vger.kernel.org
-S: Supported
+S: Maintained
F: Documentation/netlabel/
F: include/net/netlabel.h
F: net/netlabel/
@@ -4451,7 +4462,6 @@ F: include/linux/netdevice.h
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
-M: "Pekka Savola (ipv6)" <pekkas@netcore.fi>
M: James Morris <jmorris@namei.org>
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
M: Patrick McHardy <kaber@trash.net>
@@ -4464,7 +4474,7 @@ F: include/net/ip*
F: arch/x86/net/*
NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK)
-M: Paul Moore <paul.moore@hp.com>
+M: Paul Moore <paul@paul-moore.com>
L: netdev@vger.kernel.org
S: Maintained
@@ -4716,6 +4726,7 @@ S: Maintained
F: drivers/of
F: include/linux/of*.h
K: of_get_property
+K: of_match_table
OPENRISC ARCHITECTURE
M: Jonas Bonn <jonas@southpole.se>
@@ -6312,6 +6323,7 @@ F: include/linux/sysv_fs.h
TARGET SUBSYSTEM
M: Nicholas A. Bellinger <nab@linux-iscsi.org>
L: linux-scsi@vger.kernel.org
+L: target-devel@vger.kernel.org
L: http://groups.google.com/group/linux-iscsi-target-dev
W: http://www.linux-iscsi.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core-2.6.git master
diff --git a/Makefile b/Makefile
index f676d15cd34..b4ca4e111c9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
-PATCHLEVEL = 0
+PATCHLEVEL = 1
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Sneaky Weasel
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 26b0e2397a5..4b0669cbb3b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -178,4 +178,7 @@ config HAVE_ARCH_MUTEX_CPU_RELAX
config HAVE_RCU_TABLE_FREE
bool
+config ARCH_HAVE_NMI_SAFE_CMPXCHG
+ bool
+
source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index ca2da8da6e9..60cde53d266 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -14,6 +14,7 @@ config ALPHA
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index acca35aebe2..aeef960ff79 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -112,9 +112,6 @@ EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
#endif
- /* crypto hash */
-EXPORT_SYMBOL(sha_transform);
-
/* gcc lib functions */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 5e1e5419722..1a347f481e5 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -30,6 +30,7 @@
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
+#include <linux/cpuidle.h>
#include <asm/cacheflush.h>
#include <asm/leds.h>
@@ -196,7 +197,8 @@ void cpu_idle(void)
cpu_relax();
} else {
stop_critical_timings();
- pm_idle();
+ if (cpuidle_idle_call())
+ pm_idle();
start_critical_timings();
/*
* This will eventually be removed - pm_idle
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 59ff42ddf0a..cf73a7f742d 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
- ucmpdi2.o lib1funcs.o div64.o sha1.o \
+ ucmpdi2.o lib1funcs.o div64.o \
io-readsb.o io-writesb.o io-readsl.o io-writesl.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
diff --git a/arch/arm/lib/sha1.S b/arch/arm/lib/sha1.S
deleted file mode 100644
index eb0edb80d7b..00000000000
--- a/arch/arm/lib/sha1.S
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * linux/arch/arm/lib/sha1.S
- *
- * SHA transform optimized for ARM
- *
- * Copyright: (C) 2005 by Nicolas Pitre <nico@fluxnic.net>
- * Created: September 17, 2005
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * The reference implementation for this code is linux/lib/sha1.c
- */
-
-#include <linux/linkage.h>
-
- .text
-
-
-/*
- * void sha_transform(__u32 *digest, const char *in, __u32 *W)
- *
- * Note: the "in" ptr may be unaligned.
- */
-
-ENTRY(sha_transform)
-
- stmfd sp!, {r4 - r8, lr}
-
- @ for (i = 0; i < 16; i++)
- @ W[i] = be32_to_cpu(in[i]);
-
-#ifdef __ARMEB__
- mov r4, r0
- mov r0, r2
- mov r2, #64
- bl memcpy
- mov r2, r0
- mov r0, r4
-#else
- mov r3, r2
- mov lr, #16
-1: ldrb r4, [r1], #1
- ldrb r5, [r1], #1
- ldrb r6, [r1], #1
- ldrb r7, [r1], #1
- subs lr, lr, #1
- orr r5, r5, r4, lsl #8
- orr r6, r6, r5, lsl #8
- orr r7, r7, r6, lsl #8
- str r7, [r3], #4
- bne 1b
-#endif
-
- @ for (i = 0; i < 64; i++)
- @ W[i+16] = ror(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 31);
-
- sub r3, r2, #4
- mov lr, #64
-2: ldr r4, [r3, #4]!
- subs lr, lr, #1
- ldr r5, [r3, #8]
- ldr r6, [r3, #32]
- ldr r7, [r3, #52]
- eor r4, r4, r5
- eor r4, r4, r6
- eor r4, r4, r7
- mov r4, r4, ror #31
- str r4, [r3, #64]
- bne 2b
-
- /*
- * The SHA functions are:
- *
- * f1(B,C,D) = (D ^ (B & (C ^ D)))
- * f2(B,C,D) = (B ^ C ^ D)
- * f3(B,C,D) = ((B & C) | (D & (B | C)))
- *
- * Then the sub-blocks are processed as follows:
- *
- * A' = ror(A, 27) + f(B,C,D) + E + K + *W++
- * B' = A
- * C' = ror(B, 2)
- * D' = C
- * E' = D
- *
- * We therefore unroll each loop 5 times to avoid register shuffling.
- * Also the ror for C (and also D and E which are successivelyderived
- * from it) is applied in place to cut on an additional mov insn for
- * each round.
- */
-
- .macro sha_f1, A, B, C, D, E
- ldr r3, [r2], #4
- eor ip, \C, \D
- add \E, r1, \E, ror #2
- and ip, \B, ip, ror #2
- add \E, \E, \A, ror #27
- eor ip, ip, \D, ror #2
- add \E, \E, r3
- add \E, \E, ip
- .endm
-
- .macro sha_f2, A, B, C, D, E
- ldr r3, [r2], #4
- add \E, r1, \E, ror #2
- eor ip, \B, \C, ror #2
- add \E, \E, \A, ror #27
- eor ip, ip, \D, ror #2
- add \E, \E, r3
- add \E, \E, ip
- .endm
-
- .macro sha_f3, A, B, C, D, E
- ldr r3, [r2], #4
- add \E, r1, \E, ror #2
- orr ip, \B, \C, ror #2
- add \E, \E, \A, ror #27
- and ip, ip, \D, ror #2
- add \E, \E, r3
- and r3, \B, \C, ror #2
- orr ip, ip, r3
- add \E, \E, ip
- .endm
-
- ldmia r0, {r4 - r8}
-
- mov lr, #4
- ldr r1, .L_sha_K + 0
-
- /* adjust initial values */
- mov r6, r6, ror #30
- mov r7, r7, ror #30
- mov r8, r8, ror #30
-
-3: subs lr, lr, #1
- sha_f1 r4, r5, r6, r7, r8
- sha_f1 r8, r4, r5, r6, r7
- sha_f1 r7, r8, r4, r5, r6
- sha_f1 r6, r7, r8, r4, r5
- sha_f1 r5, r6, r7, r8, r4
- bne 3b
-
- ldr r1, .L_sha_K + 4
- mov lr, #4
-
-4: subs lr, lr, #1
- sha_f2 r4, r5, r6, r7, r8
- sha_f2 r8, r4, r5, r6, r7
- sha_f2 r7, r8, r4, r5, r6
- sha_f2 r6, r7, r8, r4, r5
- sha_f2 r5, r6, r7, r8, r4
- bne 4b
-
- ldr r1, .L_sha_K + 8
- mov lr, #4
-
-5: subs lr, lr, #1
- sha_f3 r4, r5, r6, r7, r8
- sha_f3 r8, r4, r5, r6, r7
- sha_f3 r7, r8, r4, r5, r6
- sha_f3 r6, r7, r8, r4, r5
- sha_f3 r5, r6, r7, r8, r4
- bne 5b
-
- ldr r1, .L_sha_K + 12
- mov lr, #4
-
-6: subs lr, lr, #1
- sha_f2 r4, r5, r6, r7, r8
- sha_f2 r8, r4, r5, r6, r7
- sha_f2 r7, r8, r4, r5, r6
- sha_f2 r6, r7, r8, r4, r5
- sha_f2 r5, r6, r7, r8, r4
- bne 6b
-
- ldmia r0, {r1, r2, r3, ip, lr}
- add r4, r1, r4
- add r5, r2, r5
- add r6, r3, r6, ror #2
- add r7, ip, r7, ror #2
- add r8, lr, r8, ror #2
- stmia r0, {r4 - r8}
-
- ldmfd sp!, {r4 - r8, pc}
-
-ENDPROC(sha_transform)
-
- .align 2
-.L_sha_K:
- .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
-
-
-/*
- * void sha_init(__u32 *buf)
- */
-
- .align 2
-.L_sha_initial_digest:
- .word 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0
-
-ENTRY(sha_init)
-
- str lr, [sp, #-4]!
- adr r1, .L_sha_initial_digest
- ldmia r1, {r1, r2, r3, ip, lr}
- stmia r0, {r1, r2, r3, ip, lr}
- ldr pc, [sp], #4
-
-ENDPROC(sha_init)
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 888e92502e1..ebde97f5d5f 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -11,6 +11,7 @@ config ARCH_MSM7X00A
select MSM_SMD
select MSM_SMD_PKG3
select CPU_V6
+ select GPIO_MSM_V1
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
@@ -22,6 +23,7 @@ config ARCH_MSM7X30
select MSM_VIC
select CPU_V7
select MSM_GPIOMUX
+ select GPIO_MSM_V1
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
@@ -33,6 +35,7 @@ config ARCH_QSD8X50
select MSM_VIC
select CPU_V7
select MSM_GPIOMUX
+ select GPIO_MSM_V1
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
@@ -44,6 +47,7 @@ config ARCH_MSM8X60
select ARM_GIC
select CPU_V7
select MSM_V2_TLMM
+ select GPIO_MSM_V2
select MSM_GPIOMUX
select MSM_SCM if SMP
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index b70658c5ae0..4285dfd80b6 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -29,11 +29,3 @@ obj-$(CONFIG_ARCH_MSM8960) += board-msm8960.o devices-msm8960.o
obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-v1.o gpiomux.o
obj-$(CONFIG_ARCH_QSD8X50) += gpiomux-8x50.o gpiomux-v1.o gpiomux.o
obj-$(CONFIG_ARCH_MSM8X60) += gpiomux-8x60.o gpiomux-v2.o gpiomux.o
-ifdef CONFIG_MSM_V2_TLMM
-ifndef CONFIG_ARCH_MSM8960
-# TODO: TLMM Mapping issues need to be resolved
-obj-y += gpio-v2.o
-endif
-else
-obj-y += gpio.o
-endif
diff --git a/arch/arm/mach-msm/gpio.c b/arch/arm/mach-msm/gpio.c
deleted file mode 100644
index 5ea273b00da..00000000000
--- a/arch/arm/mach-msm/gpio.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/* linux/arch/arm/mach-msm/gpio.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/bitops.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include "gpio_hw.h"
-#include "gpiomux.h"
-
-#define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0)
-
-#define MSM_GPIO_BANK(bank, first, last) \
- { \
- .regs = { \
- .out = MSM_GPIO_OUT_##bank, \
- .in = MSM_GPIO_IN_##bank, \
- .int_status = MSM_GPIO_INT_STATUS_##bank, \
- .int_clear = MSM_GPIO_INT_CLEAR_##bank, \
- .int_en = MSM_GPIO_INT_EN_##bank, \
- .int_edge = MSM_GPIO_INT_EDGE_##bank, \
- .int_pos = MSM_GPIO_INT_POS_##bank, \
- .oe = MSM_GPIO_OE_##bank, \
- }, \
- .chip = { \
- .base = (first), \
- .ngpio = (last) - (first) + 1, \
- .get = msm_gpio_get, \
- .set = msm_gpio_set, \
- .direction_input = msm_gpio_direction_input, \
- .direction_output = msm_gpio_direction_output, \
- .to_irq = msm_gpio_to_irq, \
- .request = msm_gpio_request, \
- .free = msm_gpio_free, \
- } \
- }
-
-#define MSM_GPIO_BROKEN_INT_CLEAR 1
-
-struct msm_gpio_regs {
- void __iomem *out;
- void __iomem *in;
- void __iomem *int_status;
- void __iomem *int_clear;
- void __iomem *int_en;
- void __iomem *int_edge;
- void __iomem *int_pos;
- void __iomem *oe;
-};
-
-struct msm_gpio_chip {
- spinlock_t lock;
- struct gpio_chip chip;
- struct msm_gpio_regs regs;
-#if MSM_GPIO_BROKEN_INT_CLEAR
- unsigned int_status_copy;
-#endif
- unsigned int both_edge_detect;
- unsigned int int_enable[2]; /* 0: awake, 1: sleep */
-};
-
-static int msm_gpio_write(struct msm_gpio_chip *msm_chip,
- unsigned offset, unsigned on)
-{
- unsigned mask = BIT(offset);
- unsigned val;
-
- val = readl(msm_chip->regs.out);
- if (on)
- writel(val | mask, msm_chip->regs.out);
- else
- writel(val & ~mask, msm_chip->regs.out);
- return 0;
-}
-
-static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip)
-{
- int loop_limit = 100;
- unsigned pol, val, val2, intstat;
- do {
- val = readl(msm_chip->regs.in);
- pol = readl(msm_chip->regs.int_pos);
- pol = (pol & ~msm_chip->both_edge_detect) |
- (~val & msm_chip->both_edge_detect);
- writel(pol, msm_chip->regs.int_pos);
- intstat = readl(msm_chip->regs.int_status);
- val2 = readl(msm_chip->regs.in);
- if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0)
- return;
- } while (loop_limit-- > 0);
- printk(KERN_ERR "msm_gpio_update_both_edge_detect, "
- "failed to reach stable state %x != %x\n", val, val2);
-}
-
-static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip,
- unsigned offset)
-{
- unsigned bit = BIT(offset);
-
-#if MSM_GPIO_BROKEN_INT_CLEAR
- /* Save interrupts that already triggered before we loose them. */
- /* Any interrupt that triggers between the read of int_status */
- /* and the write to int_clear will still be lost though. */
- msm_chip->int_status_copy |= readl(msm_chip->regs.int_status);
- msm_chip->int_status_copy &= ~bit;
-#endif
- writel(bit, msm_chip->regs.int_clear);
- msm_gpio_update_both_edge_detect(msm_chip);
- return 0;
-}
-
-static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct msm_gpio_chip *msm_chip;
- unsigned long irq_flags;
-
- msm_chip = container_of(chip, struct msm_gpio_chip, chip);
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- writel(readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
- return 0;
-}
-
-static int
-msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct msm_gpio_chip *msm_chip;
- unsigned long irq_flags;
-
- msm_chip = container_of(chip, struct msm_gpio_chip, chip);
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- msm_gpio_write(msm_chip, offset, value);
- writel(readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
- return 0;
-}
-
-static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct msm_gpio_chip *msm_chip;
-
- msm_chip = container_of(chip, struct msm_gpio_chip, chip);
- return (readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0;
-}
-
-static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct msm_gpio_chip *msm_chip;
- unsigned long irq_flags;
-
- msm_chip = container_of(chip, struct msm_gpio_chip, chip);
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- msm_gpio_write(msm_chip, offset, value);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
-}
-
-static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- return MSM_GPIO_TO_INT(chip->base + offset);
-}
-
-#ifdef CONFIG_MSM_GPIOMUX
-static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- return msm_gpiomux_get(chip->base + offset);
-}
-
-static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- msm_gpiomux_put(chip->base + offset);
-}
-#else
-#define msm_gpio_request NULL
-#define msm_gpio_free NULL
-#endif
-
-struct msm_gpio_chip msm_gpio_chips[] = {
-#if defined(CONFIG_ARCH_MSM7X00A)
- MSM_GPIO_BANK(0, 0, 15),
- MSM_GPIO_BANK(1, 16, 42),
- MSM_GPIO_BANK(2, 43, 67),
- MSM_GPIO_BANK(3, 68, 94),
- MSM_GPIO_BANK(4, 95, 106),
- MSM_GPIO_BANK(5, 107, 121),
-#elif defined(CONFIG_ARCH_MSM7X25) || defined(CONFIG_ARCH_MSM7X27)
- MSM_GPIO_BANK(0, 0, 15),
- MSM_GPIO_BANK(1, 16, 42),
- MSM_GPIO_BANK(2, 43, 67),
- MSM_GPIO_BANK(3, 68, 94),
- MSM_GPIO_BANK(4, 95, 106),
- MSM_GPIO_BANK(5, 107, 132),
-#elif defined(CONFIG_ARCH_MSM7X30)
- MSM_GPIO_BANK(0, 0, 15),
- MSM_GPIO_BANK(1, 16, 43),
- MSM_GPIO_BANK(2, 44, 67),
- MSM_GPIO_BANK(3, 68, 94),
- MSM_GPIO_BANK(4, 95, 106),
- MSM_GPIO_BANK(5, 107, 133),
- MSM_GPIO_BANK(6, 134, 150),
- MSM_GPIO_BANK(7, 151, 181),
-#elif defined(CONFIG_ARCH_QSD8X50)
- MSM_GPIO_BANK(0, 0, 15),
- MSM_GPIO_BANK(1, 16, 42),
- MSM_GPIO_BANK(2, 43, 67),
- MSM_GPIO_BANK(3, 68, 94),
- MSM_GPIO_BANK(4, 95, 103),
- MSM_GPIO_BANK(5, 104, 121),
- MSM_GPIO_BANK(6, 122, 152),
- MSM_GPIO_BANK(7, 153, 164),
-#endif
-};
-
-static void msm_gpio_irq_ack(struct irq_data *d)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- msm_gpio_clear_detect_status(msm_chip,
- d->irq - gpio_to_irq(msm_chip->chip.base));
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
-}
-
-static void msm_gpio_irq_mask(struct irq_data *d)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
-
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- /* level triggered interrupts are also latched */
- if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
- msm_gpio_clear_detect_status(msm_chip, offset);
- msm_chip->int_enable[0] &= ~BIT(offset);
- writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
-}
-
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
-
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- /* level triggered interrupts are also latched */
- if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
- msm_gpio_clear_detect_status(msm_chip, offset);
- msm_chip->int_enable[0] |= BIT(offset);
- writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
-}
-
-static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
-
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
-
- if (on)
- msm_chip->int_enable[1] |= BIT(offset);
- else
- msm_chip->int_enable[1] &= ~BIT(offset);
-
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
- return 0;
-}
-
-static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- unsigned long irq_flags;
- struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
- unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
- unsigned val, mask = BIT(offset);
-
- spin_lock_irqsave(&msm_chip->lock, irq_flags);
- val = readl(msm_chip->regs.int_edge);
- if (flow_type & IRQ_TYPE_EDGE_BOTH) {
- writel(val | mask, msm_chip->regs.int_edge);
- __irq_set_handler_locked(d->irq, handle_edge_irq);
- } else {
- writel(val & ~mask, msm_chip->regs.int_edge);
- __irq_set_handler_locked(d->irq, handle_level_irq);
- }
- if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
- msm_chip->both_edge_detect |= mask;
- msm_gpio_update_both_edge_detect(msm_chip);
- } else {
- msm_chip->both_edge_detect &= ~mask;
- val = readl(msm_chip->regs.int_pos);
- if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
- writel(val | mask, msm_chip->regs.int_pos);
- else
- writel(val & ~mask, msm_chip->regs.int_pos);
- }
- spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
- return 0;
-}
-
-static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- int i, j, mask;
- unsigned val;
-
- for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
- struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
- val = readl(msm_chip->regs.int_status);
- val &= msm_chip->int_enable[0];
- while (val) {
- mask = val & -val;
- j = fls(mask) - 1;
- /* printk("%s %08x %08x bit %d gpio %d irq %d\n",
- __func__, v, m, j, msm_chip->chip.start + j,
- FIRST_GPIO_IRQ + msm_chip->chip.start + j); */
- val &= ~mask;
- generic_handle_irq(FIRST_GPIO_IRQ +
- msm_chip->chip.base + j);
- }
- }
- desc->irq_data.chip->irq_ack(&desc->irq_data);
-}
-
-static struct irq_chip msm_gpio_irq_chip = {
- .name = "msmgpio",
- .irq_ack = msm_gpio_irq_ack,
- .irq_mask = msm_gpio_irq_mask,
- .irq_unmask = msm_gpio_irq_unmask,
- .irq_set_wake = msm_gpio_irq_set_wake,
- .irq_set_type = msm_gpio_irq_set_type,
-};
-
-static int __init msm_init_gpio(void)
-{
- int i, j = 0;
-
- for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) {
- if (i - FIRST_GPIO_IRQ >=
- msm_gpio_chips[j].chip.base +
- msm_gpio_chips[j].chip.ngpio)
- j++;
- irq_set_chip_data(i, &msm_gpio_chips[j]);
- irq_set_chip_and_handler(i, &msm_gpio_irq_chip,
- handle_edge_irq);
- set_irq_flags(i, IRQF_VALID);
- }
-
- for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
- spin_lock_init(&msm_gpio_chips[i].lock);
- writel(0, msm_gpio_chips[i].regs.int_en);
- gpiochip_add(&msm_gpio_chips[i].chip);
- }
-
- irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
- irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
- irq_set_irq_wake(INT_GPIO_GROUP1, 1);
- irq_set_irq_wake(INT_GPIO_GROUP2, 2);
- return 0;
-}
-
-postcore_initcall(msm_init_gpio);
diff --git a/arch/arm/mach-msm/gpio_hw.h b/arch/arm/mach-msm/gpio_hw.h
deleted file mode 100644
index 6b5066038ba..00000000000
--- a/arch/arm/mach-msm/gpio_hw.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/* arch/arm/mach-msm/gpio_hw.h
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ARCH_ARM_MACH_MSM_GPIO_HW_H
-#define __ARCH_ARM_MACH_MSM_GPIO_HW_H
-
-#include <mach/msm_iomap.h>
-
-/* see 80-VA736-2 Rev C pp 695-751
-**
-** These are actually the *shadow* gpio registers, since the
-** real ones (which allow full access) are only available to the
-** ARM9 side of the world.
-**
-** Since the _BASE need to be page-aligned when we're mapping them
-** to virtual addresses, adjust for the additional offset in these
-** macros.
-*/
-
-#if defined(CONFIG_ARCH_MSM7X30)
-#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off))
-#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off))
-#else
-#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + 0x800 + (off))
-#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off))
-#endif
-
-#if defined(CONFIG_ARCH_MSM7X00A) || defined(CONFIG_ARCH_MSM7X25) ||\
- defined(CONFIG_ARCH_MSM7X27)
-
-/* output value */
-#define MSM_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
-#define MSM_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 42-16 */
-#define MSM_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-43 */
-#define MSM_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
-#define MSM_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */
-#define MSM_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 107-121 */
-
-/* same pin map as above, output enable */
-#define MSM_GPIO_OE_0 MSM_GPIO1_REG(0x10)
-#define MSM_GPIO_OE_1 MSM_GPIO2_REG(0x08)
-#define MSM_GPIO_OE_2 MSM_GPIO1_REG(0x14)
-#define MSM_GPIO_OE_3 MSM_GPIO1_REG(0x18)
-#define MSM_GPIO_OE_4 MSM_GPIO1_REG(0x1C)
-#define MSM_GPIO_OE_5 MSM_GPIO1_REG(0x54)
-
-/* same pin map as above, input read */
-#define MSM_GPIO_IN_0 MSM_GPIO1_REG(0x34)
-#define MSM_GPIO_IN_1 MSM_GPIO2_REG(0x20)
-#define MSM_GPIO_IN_2 MSM_GPIO1_REG(0x38)
-#define MSM_GPIO_IN_3 MSM_GPIO1_REG(0x3C)
-#define MSM_GPIO_IN_4 MSM_GPIO1_REG(0x40)
-#define MSM_GPIO_IN_5 MSM_GPIO1_REG(0x44)
-
-/* same pin map as above, 1=edge 0=level interrup */
-#define MSM_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60)
-#define MSM_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
-#define MSM_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64)
-#define MSM_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68)
-#define MSM_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C)
-#define MSM_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0)
-
-/* same pin map as above, 1=positive 0=negative */
-#define MSM_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70)
-#define MSM_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
-#define MSM_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74)
-#define MSM_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78)
-#define MSM_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C)
-#define MSM_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC)
-
-/* same pin map as above, interrupt enable */
-#define MSM_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80)
-#define MSM_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
-#define MSM_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84)
-#define MSM_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88)
-#define MSM_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C)
-#define MSM_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8)
-
-/* same pin map as above, write 1 to clear interrupt */
-#define MSM_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90)
-#define MSM_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
-#define MSM_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94)
-#define MSM_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98)
-#define MSM_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C)
-#define MSM_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4)
-
-/* same pin map as above, 1=interrupt pending */
-#define MSM_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0)
-#define MSM_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
-#define MSM_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4)
-#define MSM_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8)
-#define MSM_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC)
-#define MSM_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0)
-
-#endif
-
-#if defined(CONFIG_ARCH_QSD8X50)
-/* output value */
-#define MSM_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
-#define MSM_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 42-16 */
-#define MSM_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-43 */
-#define MSM_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
-#define MSM_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 103-95 */
-#define MSM_GPIO_OUT_5 MSM_GPIO1_REG(0x10) /* gpio 121-104 */
-#define MSM_GPIO_OUT_6 MSM_GPIO1_REG(0x14) /* gpio 152-122 */
-#define MSM_GPIO_OUT_7 MSM_GPIO1_REG(0x18) /* gpio 164-153 */
-
-/* same pin map as above, output enable */
-#define MSM_GPIO_OE_0 MSM_GPIO1_REG(0x20)
-#define MSM_GPIO_OE_1 MSM_GPIO2_REG(0x08)
-#define MSM_GPIO_OE_2 MSM_GPIO1_REG(0x24)
-#define MSM_GPIO_OE_3 MSM_GPIO1_REG(0x28)
-#define MSM_GPIO_OE_4 MSM_GPIO1_REG(0x2C)
-#define MSM_GPIO_OE_5 MSM_GPIO1_REG(0x30)
-#define MSM_GPIO_OE_6 MSM_GPIO1_REG(0x34)
-#define MSM_GPIO_OE_7 MSM_GPIO1_REG(0x38)
-
-/* same pin map as above, input read */
-#define MSM_GPIO_IN_0 MSM_GPIO1_REG(0x50)
-#define MSM_GPIO_IN_1 MSM_GPIO2_REG(0x20)
-#define MSM_GPIO_IN_2 MSM_GPIO1_REG(0x54)
-#define MSM_GPIO_IN_3 MSM_GPIO1_REG(0x58)
-#define MSM_GPIO_IN_4 MSM_GPIO1_REG(0x5C)
-#define MSM_GPIO_IN_5 MSM_GPIO1_REG(0x60)
-#define MSM_GPIO_IN_6 MSM_GPIO1_REG(0x64)
-#define MSM_GPIO_IN_7 MSM_GPIO1_REG(0x68)
-
-/* same pin map as above, 1=edge 0=level interrup */
-#define MSM_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x70)
-#define MSM_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
-#define MSM_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x74)
-#define MSM_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x78)
-#define MSM_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x7C)
-#define MSM_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0x80)
-#define MSM_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0x84)
-#define MSM_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x88)
-
-/* same pin map as above, 1=positive 0=negative */
-#define MSM_GPIO_INT_POS_0 MSM_GPIO1_REG(0x90)
-#define MSM_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
-#define MSM_GPIO_INT_POS_2 MSM_GPIO1_REG(0x94)
-#define MSM_GPIO_INT_POS_3 MSM_GPIO1_REG(0x98)
-#define MSM_GPIO_INT_POS_4 MSM_GPIO1_REG(0x9C)
-#define MSM_GPIO_INT_POS_5 MSM_GPIO1_REG(0xA0)
-#define MSM_GPIO_INT_POS_6 MSM_GPIO1_REG(0xA4)
-#define MSM_GPIO_INT_POS_7 MSM_GPIO1_REG(0xA8)
-
-/* same pin map as above, interrupt enable */
-#define MSM_GPIO_INT_EN_0 MSM_GPIO1_REG(0xB0)
-#define MSM_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
-#define MSM_GPIO_INT_EN_2 MSM_GPIO1_REG(0xB4)
-#define MSM_GPIO_INT_EN_3 MSM_GPIO1_REG(0xB8)
-#define MSM_GPIO_INT_EN_4 MSM_GPIO1_REG(0xBC)
-#define MSM_GPIO_INT_EN_5 MSM_GPIO1_REG(0xC0)
-#define MSM_GPIO_INT_EN_6 MSM_GPIO1_REG(0xC4)
-#define MSM_GPIO_INT_EN_7 MSM_GPIO1_REG(0xC8)
-
-/* same pin map as above, write 1 to clear interrupt */
-#define MSM_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0xD0)
-#define MSM_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
-#define MSM_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0xD4)
-#define MSM_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0xD8)
-#define MSM_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0xDC)
-#define MSM_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xE0)
-#define MSM_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xE4)
-#define MSM_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0xE8)
-
-/* same pin map as above, 1=interrupt pending */
-#define MSM_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xF0)
-#define MSM_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
-#define MSM_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xF4)
-#define MSM_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xF8)
-#define MSM_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xFC)
-#define MSM_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0x100)
-#define MSM_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0x104)
-#define MSM_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x108)
-
-#endif
-
-#if defined(CONFIG_ARCH_MSM7X30)
-
-/* output value */
-#define MSM_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
-#define MSM_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */
-#define MSM_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */
-#define MSM_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
-#define MSM_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */
-#define MSM_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */
-#define MSM_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */
-#define MSM_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */
-
-/* same pin map as above, output enable */
-#define MSM_GPIO_OE_0 MSM_GPIO1_REG(0x10)
-#define MSM_GPIO_OE_1 MSM_GPIO2_REG(0x08)
-#define MSM_GPIO_OE_2 MSM_GPIO1_REG(0x14)
-#define MSM_GPIO_OE_3 MSM_GPIO1_REG(0x18)
-#define MSM_GPIO_OE_4 MSM_GPIO1_REG(0x1C)
-#define MSM_GPIO_OE_5 MSM_GPIO1_REG(0x54)
-#define MSM_GPIO_OE_6 MSM_GPIO1_REG(0xC8)
-#define MSM_GPIO_OE_7 MSM_GPIO1_REG(0x218)
-
-/* same pin map as above, input read */
-#define MSM_GPIO_IN_0 MSM_GPIO1_REG(0x34)
-#define MSM_GPIO_IN_1 MSM_GPIO2_REG(0x20)
-#define MSM_GPIO_IN_2 MSM_GPIO1_REG(0x38)
-#define MSM_GPIO_IN_3 MSM_GPIO1_REG(0x3C)
-#define MSM_GPIO_IN_4 MSM_GPIO1_REG(0x40)
-#define MSM_GPIO_IN_5 MSM_GPIO1_REG(0x44)
-#define MSM_GPIO_IN_6 MSM_GPIO1_REG(0xCC)
-#define MSM_GPIO_IN_7 MSM_GPIO1_REG(0x21C)
-
-/* same pin map as above, 1=edge 0=level interrup */
-#define MSM_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60)
-#define MSM_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
-#define MSM_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64)
-#define MSM_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68)
-#define MSM_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C)
-#define MSM_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0)
-#define MSM_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0)
-#define MSM_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240)
-
-/* same pin map as above, 1=positive 0=negative */
-#define MSM_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70)
-#define MSM_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
-#define MSM_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74)
-#define MSM_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78)
-#define MSM_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C)
-#define MSM_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC)
-#define MSM_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4)
-#define MSM_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228)
-
-/* same pin map as above, interrupt enable */
-#define MSM_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80)
-#define MSM_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
-#define MSM_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84)
-#define MSM_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88)
-#define MSM_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C)
-#define MSM_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8)
-#define MSM_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8)
-#define MSM_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C)
-
-/* same pin map as above, write 1 to clear interrupt */
-#define MSM_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90)
-#define MSM_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
-#define MSM_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94)
-#define MSM_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98)
-#define MSM_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C)
-#define MSM_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4)
-#define MSM_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC)
-#define MSM_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230)
-
-/* same pin map as above, 1=interrupt pending */
-#define MSM_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0)
-#define MSM_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
-#define MSM_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4)
-#define MSM_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8)
-#define MSM_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC)
-#define MSM_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0)
-#define MSM_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0)
-#define MSM_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234)
-
-#endif
-
-#endif
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h
index b178d9cb742..00459f6ee13 100644
--- a/arch/arm/mach-msm/gpiomux.h
+++ b/arch/arm/mach-msm/gpiomux.h
@@ -19,6 +19,7 @@
#include <linux/bitops.h>
#include <linux/errno.h>
+#include <mach/msm_gpiomux.h>
#if defined(CONFIG_MSM_V2_TLMM)
#include "gpiomux-v2.h"
@@ -71,12 +72,6 @@ enum {
*/
extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
-/* Increment a gpio's reference count, possibly activating the line. */
-int __must_check msm_gpiomux_get(unsigned gpio);
-
-/* Decrement a gpio's reference count, possibly suspending the line. */
-int msm_gpiomux_put(unsigned gpio);
-
/* Install a new configuration to the gpio line. To avoid overwriting
* a configuration, leave the VALID bit out.
*/
@@ -94,16 +89,6 @@ int msm_gpiomux_write(unsigned gpio,
*/
void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
#else
-static inline int __must_check msm_gpiomux_get(unsigned gpio)
-{
- return -ENOSYS;
-}
-
-static inline int msm_gpiomux_put(unsigned gpio)
-{
- return -ENOSYS;
-}
-
static inline int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,
gpiomux_config_t suspended)
diff --git a/arch/arm/mach-msm/include/mach/msm_gpiomux.h b/arch/arm/mach-msm/include/mach/msm_gpiomux.h
new file mode 100644
index 00000000000..0c7d3936e02
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_gpiomux.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_GPIOMUX_H
+#define _LINUX_MSM_GPIOMUX_H
+
+#ifdef CONFIG_MSM_GPIOMUX
+
+/* Increment a gpio's reference count, possibly activating the line. */
+int __must_check msm_gpiomux_get(unsigned gpio);
+
+/* Decrement a gpio's reference count, possibly suspending the line. */
+int msm_gpiomux_put(unsigned gpio);
+
+#else
+
+static inline int __must_check msm_gpiomux_get(unsigned gpio)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_gpiomux_put(unsigned gpio)
+{
+ return -ENOSYS;
+}
+
+#endif
+
+#endif /* _LINUX_MSM_GPIOMUX_H */
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
index 8f99d97615a..94fe9fe6feb 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
@@ -55,13 +55,11 @@
#define MSM_DMOV_PHYS 0xA9700000
#define MSM_DMOV_SIZE SZ_4K
-#define MSM_GPIO1_BASE IOMEM(0xE0003000)
-#define MSM_GPIO1_PHYS 0xA9200000
-#define MSM_GPIO1_SIZE SZ_4K
+#define MSM7X00_GPIO1_PHYS 0xA9200000
+#define MSM7X00_GPIO1_SIZE SZ_4K
-#define MSM_GPIO2_BASE IOMEM(0xE0004000)
-#define MSM_GPIO2_PHYS 0xA9300000
-#define MSM_GPIO2_SIZE SZ_4K
+#define MSM7X00_GPIO2_PHYS 0xA9300000
+#define MSM7X00_GPIO2_SIZE SZ_4K
#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
#define MSM_CLK_CTL_PHYS 0xA8600000
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
index 4d84be15955..37694442d1b 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
@@ -46,13 +46,11 @@
#define MSM_DMOV_PHYS 0xAC400000
#define MSM_DMOV_SIZE SZ_4K
-#define MSM_GPIO1_BASE IOMEM(0xE0003000)
-#define MSM_GPIO1_PHYS 0xAC001000
-#define MSM_GPIO1_SIZE SZ_4K
+#define MSM7X30_GPIO1_PHYS 0xAC001000
+#define MSM7X30_GPIO1_SIZE SZ_4K
-#define MSM_GPIO2_BASE IOMEM(0xE0004000)
-#define MSM_GPIO2_PHYS 0xAC101000
-#define MSM_GPIO2_SIZE SZ_4K
+#define MSM7X30_GPIO2_PHYS 0xAC101000
+#define MSM7X30_GPIO2_SIZE SZ_4K
#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
#define MSM_CLK_CTL_PHYS 0xAB800000
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
index d4143201999..d67cd73316f 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
@@ -46,13 +46,11 @@
#define MSM_DMOV_PHYS 0xA9700000
#define MSM_DMOV_SIZE SZ_4K
-#define MSM_GPIO1_BASE IOMEM(0xE0003000)
-#define MSM_GPIO1_PHYS 0xA9000000
-#define MSM_GPIO1_SIZE SZ_4K
+#define QSD8X50_GPIO1_PHYS 0xA9000000
+#define QSD8X50_GPIO1_SIZE SZ_4K
-#define MSM_GPIO2_BASE IOMEM(0xE0004000)
-#define MSM_GPIO2_PHYS 0xA9100000
-#define MSM_GPIO2_SIZE SZ_4K
+#define QSD8X50_GPIO2_PHYS 0xA9100000
+#define QSD8X50_GPIO2_SIZE SZ_4K
#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
#define MSM_CLK_CTL_PHYS 0xA8600000
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 2f494b6a9d0..4ded15238b6 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -61,5 +61,7 @@
#define MSM_QGIC_CPU_BASE IOMEM(0xF0001000)
#define MSM_TMR_BASE IOMEM(0xF0200000)
#define MSM_TMR0_BASE IOMEM(0xF0201000)
+#define MSM_GPIO1_BASE IOMEM(0xE0003000)
+#define MSM_GPIO2_BASE IOMEM(0xE0004000)
#endif
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index cec6ed1c91d..140ddbbc3a8 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -43,8 +43,8 @@ static struct map_desc msm_io_desc[] __initdata = {
MSM_DEVICE(VIC),
MSM_CHIP_DEVICE(CSR, MSM7X00),
MSM_DEVICE(DMOV),
- MSM_DEVICE(GPIO1),
- MSM_DEVICE(GPIO2),
+ MSM_CHIP_DEVICE(GPIO1, MSM7X00),
+ MSM_CHIP_DEVICE(GPIO2, MSM7X00),
MSM_DEVICE(CLK_CTL),
#ifdef CONFIG_MSM_DEBUG_UART
MSM_DEVICE(DEBUG_UART),
@@ -76,8 +76,8 @@ static struct map_desc qsd8x50_io_desc[] __initdata = {
MSM_DEVICE(VIC),
MSM_CHIP_DEVICE(CSR, QSD8X50),
MSM_DEVICE(DMOV),
- MSM_DEVICE(GPIO1),
- MSM_DEVICE(GPIO2),
+ MSM_CHIP_DEVICE(GPIO1, QSD8X50),
+ MSM_CHIP_DEVICE(GPIO2, QSD8X50),
MSM_DEVICE(CLK_CTL),
MSM_DEVICE(SIRC),
MSM_DEVICE(SCPLL),
@@ -135,8 +135,8 @@ static struct map_desc msm7x30_io_desc[] __initdata = {
MSM_DEVICE(VIC),
MSM_CHIP_DEVICE(CSR, MSM7X30),
MSM_DEVICE(DMOV),
- MSM_DEVICE(GPIO1),
- MSM_DEVICE(GPIO2),
+ MSM_CHIP_DEVICE(GPIO1, MSM7X30),
+ MSM_CHIP_DEVICE(GPIO2, MSM7X30),
MSM_DEVICE(CLK_CTL),
MSM_DEVICE(CLK_CTL_SH2),
MSM_DEVICE(AD5),
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 543fcb8b518..a5b7a236aa5 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -25,6 +25,7 @@
#include <video/omapdss.h>
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
+#include <plat/omap-pm.h>
static struct platform_device omap_display_device = {
.name = "omapdss",
@@ -42,20 +43,6 @@ static struct omap_device_pm_latency omap_dss_latency[] = {
},
};
-/* oh_core is used for getting opt-clocks */
-static struct omap_hwmod *oh_core;
-
-static bool opt_clock_available(const char *clk_role)
-{
- int i;
-
- for (i = 0; i < oh_core->opt_clks_cnt; i++) {
- if (!strcmp(oh_core->opt_clks[i].role, clk_role))
- return true;
- }
- return false;
-}
-
struct omap_dss_hwmod_data {
const char *oh_name;
const char *dev_name;
@@ -109,16 +96,9 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
oh_count = ARRAY_SIZE(omap4_dss_hwmod_data);
}
- /* opt_clks are always associated with dss hwmod */
- oh_core = omap_hwmod_lookup("dss_core");
- if (!oh_core) {
- pr_err("Could not look up dss_core.\n");
- return -ENODEV;
- }
-
pdata.board_data = board_data;
- pdata.board_data->get_last_off_on_transaction_id = NULL;
- pdata.opt_clock_available = opt_clock_available;
+ pdata.board_data->get_context_loss_count =
+ omap_pm_get_dev_context_loss_count;
for (i = 0; i < oh_count; i++) {
oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name);
diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c
index 6b186aefcbd..5218c34a9cc 100644
--- a/arch/arm/mach-shmobile/clock-sh7367.c
+++ b/arch/arm/mach-shmobile/clock-sh7367.c
@@ -259,9 +259,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[CMMSTP003] = MSTP(&r_clk, CMMSTPCR0, 3, 0), /* KEYSC */
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("r_clk", &r_clk),
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 91f5779abdd..6b1619a65db 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -561,10 +561,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
-#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("dv_clki_div2_clk", &sh7372_dv_clki_div2_clk),
diff --git a/arch/arm/mach-shmobile/clock-sh7377.c b/arch/arm/mach-shmobile/clock-sh7377.c
index 95942466e63..8cee7b151ae 100644
--- a/arch/arm/mach-shmobile/clock-sh7377.c
+++ b/arch/arm/mach-shmobile/clock-sh7377.c
@@ -267,9 +267,6 @@ static struct clk mstp_clks[] = {
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("r_clk", &r_clk),
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index bcacb1e8cf8..6db2ccabc2b 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -306,10 +306,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
-#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("r_clk", &r_clk),
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index e9d689b7c83..197e96f7040 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -10,6 +10,7 @@ config AVR32
select GENERIC_IRQ_PROBE
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_SHOW
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular
diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c
index 85026537361..466af40c582 100644
--- a/arch/cris/arch-v10/drivers/sync_serial.c
+++ b/arch/cris/arch-v10/drivers/sync_serial.c
@@ -158,7 +158,7 @@ static int sync_serial_open(struct inode *inode, struct file *file);
static int sync_serial_release(struct inode *inode, struct file *file);
static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
-static int sync_serial_ioctl(struct file *file,
+static long sync_serial_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
static ssize_t sync_serial_write(struct file *file, const char *buf,
size_t count, loff_t *ppos);
@@ -625,11 +625,11 @@ static int sync_serial_open(struct inode *inode, struct file *file)
*R_IRQ_MASK1_SET = 1 << port->data_avail_bit;
DEBUG(printk(KERN_DEBUG "sser%d rec started\n", dev));
}
- ret = 0;
+ err = 0;
out:
mutex_unlock(&sync_serial_mutex);
- return ret;
+ return err;
}
static int sync_serial_release(struct inode *inode, struct file *file)
diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c
index 907cfb5a873..ba0e5965d6e 100644
--- a/arch/cris/arch-v10/kernel/irq.c
+++ b/arch/cris/arch-v10/kernel/irq.c
@@ -20,6 +20,9 @@
#define crisv10_mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr));
#define crisv10_unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr));
+extern void kgdb_init(void);
+extern void breakpoint(void);
+
/* don't use set_int_vector, it bypasses the linux interrupt handlers. it is
* global just so that the kernel gdb can use it.
*/
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 29b74a10583..332f19c5455 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -11,8 +11,6 @@
#ifdef __KERNEL__
-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
-
#ifndef __ASSEMBLY__
#include <asm/types.h>
#include <asm/processor.h>
@@ -67,8 +65,10 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
-#define alloc_thread_info(tsk, node) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
+#define alloc_thread_info_node(tsk, node) \
+ ((struct thread_info *) __get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#endif /* !__ASSEMBLY__ */
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index cb884e48942..bad27a6ff40 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -7,6 +7,7 @@ config FRV
select HAVE_PERF_EVENTS
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
config ZONE_DMA
bool
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 137b277f7e5..12485471495 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -27,6 +27,8 @@ config IA64
select GENERIC_PENDING_IRQ if SMP
select IRQ_PER_CPU
select GENERIC_IRQ_SHOW
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -89,6 +91,9 @@ config GENERIC_TIME_VSYSCALL
config HAVE_SETUP_PER_CPU_AREA
def_bool y
+config GENERIC_GPIO
+ def_bool y
+
config DMI
bool
default y
diff --git a/arch/ia64/include/asm/gpio.h b/arch/ia64/include/asm/gpio.h
new file mode 100644
index 00000000000..590a20debc4
--- /dev/null
+++ b/arch/ia64/include/asm/gpio.h
@@ -0,0 +1,55 @@
+/*
+ * Generic GPIO API implementation for IA-64.
+ *
+ * A stright copy of that for PowerPC which was:
+ *
+ * Copyright (c) 2007-2008 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ASM_IA64_GPIO_H
+#define _ASM_IA64_GPIO_H
+
+#include <linux/errno.h>
+#include <asm-generic/gpio.h>
+
+#ifdef CONFIG_GPIOLIB
+
+/*
+ * We don't (yet) implement inlined/rapid versions for on-chip gpios.
+ * Just call gpiolib.
+ */
+static inline int gpio_get_value(unsigned int gpio)
+{
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+ return __gpio_cansleep(gpio);
+}
+
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return __gpio_to_irq(gpio);
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+#endif /* _ASM_IA64_GPIO_H */
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 6fc03aff046..c38d22e5e90 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -156,7 +156,7 @@ prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \
#define STUB_SET_VARIABLE(prefix, adjust_arg) \
static efi_status_t \
prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \
- unsigned long attr, unsigned long data_size, \
+ u32 attr, unsigned long data_size, \
void *data) \
{ \
struct ia64_fpreg fr[6]; \
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 284cd3771ea..9e8ee9d2b8c 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -6,6 +6,7 @@ config M68K
select GENERIC_ATOMIC64 if MMU
select HAVE_GENERIC_HARDIRQS if !MMU
select GENERIC_IRQ_SHOW if !MMU
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
config RWSEM_GENERIC_SPINLOCK
bool
diff --git a/arch/m68k/Kconfig.mmu b/arch/m68k/Kconfig.mmu
index 16539b1d5d3..13e20bbc407 100644
--- a/arch/m68k/Kconfig.mmu
+++ b/arch/m68k/Kconfig.mmu
@@ -372,12 +372,6 @@ config AMIGA_PCMCIA
Include support in the kernel for pcmcia on Amiga 1200 and Amiga
600. If you intend to use pcmcia cards say Y; otherwise say N.
-config STRAM_PROC
- bool "ST-RAM statistics in /proc"
- depends on ATARI
- help
- Say Y here to report ST-RAM usage statistics in /proc/stram.
-
config HEARTBEAT
bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
index dd0447db1c9..99449fbf9a7 100644
--- a/arch/m68k/amiga/chipram.c
+++ b/arch/m68k/amiga/chipram.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/module.h>
+#include <asm/atomic.h>
#include <asm/page.h>
#include <asm/amigahw.h>
@@ -23,111 +24,100 @@ unsigned long amiga_chip_size;
EXPORT_SYMBOL(amiga_chip_size);
static struct resource chipram_res = {
- .name = "Chip RAM", .start = CHIP_PHYSADDR
+ .name = "Chip RAM", .start = CHIP_PHYSADDR
};
-static unsigned long chipavail;
+static atomic_t chipavail;
void __init amiga_chip_init(void)
{
- if (!AMIGAHW_PRESENT(CHIP_RAM))
- return;
+ if (!AMIGAHW_PRESENT(CHIP_RAM))
+ return;
- chipram_res.end = amiga_chip_size-1;
- request_resource(&iomem_resource, &chipram_res);
+ chipram_res.end = CHIP_PHYSADDR + amiga_chip_size - 1;
+ request_resource(&iomem_resource, &chipram_res);
- chipavail = amiga_chip_size;
+ atomic_set(&chipavail, amiga_chip_size);
}
void *amiga_chip_alloc(unsigned long size, const char *name)
{
- struct resource *res;
+ struct resource *res;
+ void *p;
- /* round up */
- size = PAGE_ALIGN(size);
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ if (!res)
+ return NULL;
-#ifdef DEBUG
- printk("amiga_chip_alloc: allocate %ld bytes\n", size);
-#endif
- res = kzalloc(sizeof(struct resource), GFP_KERNEL);
- if (!res)
- return NULL;
- res->name = name;
+ res->name = name;
+ p = amiga_chip_alloc_res(size, res);
+ if (!p) {
+ kfree(res);
+ return NULL;
+ }
- if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
- kfree(res);
- return NULL;
- }
- chipavail -= size;
-#ifdef DEBUG
- printk("amiga_chip_alloc: returning %lx\n", res->start);
-#endif
- return (void *)ZTWO_VADDR(res->start);
+ return p;
}
EXPORT_SYMBOL(amiga_chip_alloc);
- /*
- * Warning:
- * amiga_chip_alloc_res is meant only for drivers that need to allocate
- * Chip RAM before kmalloc() is functional. As a consequence, those
- * drivers must not free that Chip RAM afterwards.
- */
+ /*
+ * Warning:
+ * amiga_chip_alloc_res is meant only for drivers that need to
+ * allocate Chip RAM before kmalloc() is functional. As a consequence,
+ * those drivers must not free that Chip RAM afterwards.
+ */
-void * __init amiga_chip_alloc_res(unsigned long size, struct resource *res)
+void *amiga_chip_alloc_res(unsigned long size, struct resource *res)
{
- unsigned long start;
-
- /* round up */
- size = PAGE_ALIGN(size);
- /* dmesg into chipmem prefers memory at the safe end */
- start = CHIP_PHYSADDR + chipavail - size;
-
-#ifdef DEBUG
- printk("amiga_chip_alloc_res: allocate %ld bytes\n", size);
-#endif
- if (allocate_resource(&chipram_res, res, size, start, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
- printk("amiga_chip_alloc_res: first alloc failed!\n");
- if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0)
- return NULL;
- }
- chipavail -= size;
-#ifdef DEBUG
- printk("amiga_chip_alloc_res: returning %lx\n", res->start);
-#endif
- return (void *)ZTWO_VADDR(res->start);
+ int error;
+
+ /* round up */
+ size = PAGE_ALIGN(size);
+
+ pr_debug("amiga_chip_alloc_res: allocate %lu bytes\n", size);
+ error = allocate_resource(&chipram_res, res, size, 0, UINT_MAX,
+ PAGE_SIZE, NULL, NULL);
+ if (error < 0) {
+ pr_err("amiga_chip_alloc_res: allocate_resource() failed %d!\n",
+ error);
+ return NULL;
+ }
+
+ atomic_sub(size, &chipavail);
+ pr_debug("amiga_chip_alloc_res: returning %pR\n", res);
+ return (void *)ZTWO_VADDR(res->start);
}
void amiga_chip_free(void *ptr)
{
- unsigned long start = ZTWO_PADDR(ptr);
- struct resource **p, *res;
- unsigned long size;
-
- for (p = &chipram_res.child; (res = *p); p = &res->sibling) {
- if (res->start != start)
- continue;
- *p = res->sibling;
- size = res->end-start;
-#ifdef DEBUG
- printk("amiga_chip_free: free %ld bytes at %p\n", size, ptr);
-#endif
- chipavail += size;
+ unsigned long start = ZTWO_PADDR(ptr);
+ struct resource *res;
+ unsigned long size;
+
+ res = lookup_resource(&chipram_res, start);
+ if (!res) {
+ pr_err("amiga_chip_free: trying to free nonexistent region at "
+ "%p\n", ptr);
+ return;
+ }
+
+ size = resource_size(res);
+ pr_debug("amiga_chip_free: free %lu bytes at %p\n", size, ptr);
+ atomic_add(size, &chipavail);
+ release_resource(res);
kfree(res);
- return;
- }
- printk("amiga_chip_free: trying to free nonexistent region at %p\n", ptr);
}
EXPORT_SYMBOL(amiga_chip_free);
unsigned long amiga_chip_avail(void)
{
-#ifdef DEBUG
- printk("amiga_chip_avail : %ld bytes\n", chipavail);
-#endif
- return chipavail;
+ unsigned long n = atomic_read(&chipavail);
+
+ pr_debug("amiga_chip_avail : %lu bytes\n", n);
+ return n;
}
EXPORT_SYMBOL(amiga_chip_avail);
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index 6ec3b7f3377..0810c8d56e5 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -1,5 +1,5 @@
/*
- * arch/m68k/atari/stram.c: Functions for ST-RAM allocations
+ * Functions for ST-RAM allocations
*
* Copyright 1994-97 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
*
@@ -30,91 +30,35 @@
#include <asm/atari_stram.h>
#include <asm/io.h>
-#undef DEBUG
-
-#ifdef DEBUG
-#define DPRINTK(fmt,args...) printk( fmt, ##args )
-#else
-#define DPRINTK(fmt,args...)
-#endif
-
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_STRAM_PROC)
-/* abbrev for the && above... */
-#define DO_PROC
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
/*
- * ++roman:
- *
- * New version of ST-Ram buffer allocation. Instead of using the
- * 1 MB - 4 KB that remain when the ST-Ram chunk starts at $1000
- * (1 MB granularity!), such buffers are reserved like this:
- *
- * - If the kernel resides in ST-Ram anyway, we can take the buffer
- * from behind the current kernel data space the normal way
- * (incrementing start_mem).
- *
- * - If the kernel is in TT-Ram, stram_init() initializes start and
- * end of the available region. Buffers are allocated from there
- * and mem_init() later marks the such used pages as reserved.
- * Since each TT-Ram chunk is at least 4 MB in size, I hope there
- * won't be an overrun of the ST-Ram region by normal kernel data
- * space.
- *
- * For that, ST-Ram may only be allocated while kernel initialization
- * is going on, or exactly: before mem_init() is called. There is also
- * no provision now for freeing ST-Ram buffers. It seems that isn't
- * really needed.
- *
+ * The ST-RAM allocator allocates memory from a pool of reserved ST-RAM of
+ * configurable size, set aside on ST-RAM init.
+ * As long as this pool is not exhausted, allocation of real ST-RAM can be
+ * guaranteed.
*/
-/* Start and end (virtual) of ST-RAM */
-static void *stram_start, *stram_end;
-
-/* set after memory_init() executed and allocations via start_mem aren't
- * possible anymore */
-static int mem_init_done;
-
/* set if kernel is in ST-RAM */
static int kernel_in_stram;
-typedef struct stram_block {
- struct stram_block *next;
- void *start;
- unsigned long size;
- unsigned flags;
- const char *owner;
-} BLOCK;
-
-/* values for flags field */
-#define BLOCK_FREE 0x01 /* free structure in the BLOCKs pool */
-#define BLOCK_KMALLOCED 0x02 /* structure allocated by kmalloc() */
-#define BLOCK_GFP 0x08 /* block allocated with __get_dma_pages() */
+static struct resource stram_pool = {
+ .name = "ST-RAM Pool"
+};
-/* list of allocated blocks */
-static BLOCK *alloc_list;
+static unsigned long pool_size = 1024*1024;
-/* We can't always use kmalloc() to allocate BLOCK structures, since
- * stram_alloc() can be called rather early. So we need some pool of
- * statically allocated structures. 20 of them is more than enough, so in most
- * cases we never should need to call kmalloc(). */
-#define N_STATIC_BLOCKS 20
-static BLOCK static_blocks[N_STATIC_BLOCKS];
-/***************************** Prototypes *****************************/
+static int __init atari_stram_setup(char *arg)
+{
+ if (!MACH_IS_ATARI)
+ return 0;
-static BLOCK *add_region( void *addr, unsigned long size );
-static BLOCK *find_region( void *addr );
-static int remove_region( BLOCK *block );
+ pool_size = memparse(arg, NULL);
+ return 0;
+}
-/************************* End of Prototypes **************************/
+early_param("stram_pool", atari_stram_setup);
-
-/* ------------------------------------------------------------------------ */
-/* Public Interface */
-/* ------------------------------------------------------------------------ */
/*
* This init function is called very early by atari/config.c
@@ -123,25 +67,23 @@ static int remove_region( BLOCK *block );
void __init atari_stram_init(void)
{
int i;
+ void *stram_start;
- /* initialize static blocks */
- for( i = 0; i < N_STATIC_BLOCKS; ++i )
- static_blocks[i].flags = BLOCK_FREE;
-
- /* determine whether kernel code resides in ST-RAM (then ST-RAM is the
- * first memory block at virtual 0x0) */
+ /*
+ * determine whether kernel code resides in ST-RAM
+ * (then ST-RAM is the first memory block at virtual 0x0)
+ */
stram_start = phys_to_virt(0);
kernel_in_stram = (stram_start == 0);
- for( i = 0; i < m68k_num_memory; ++i ) {
+ for (i = 0; i < m68k_num_memory; ++i) {
if (m68k_memory[i].addr == 0) {
- /* skip first 2kB or page (supervisor-only!) */
- stram_end = stram_start + m68k_memory[i].size;
return;
}
}
+
/* Should never come here! (There is always ST-Ram!) */
- panic( "atari_stram_init: no ST-RAM found!" );
+ panic("atari_stram_init: no ST-RAM found!");
}
@@ -151,226 +93,68 @@ void __init atari_stram_init(void)
*/
void __init atari_stram_reserve_pages(void *start_mem)
{
- /* always reserve first page of ST-RAM, the first 2 kB are
- * supervisor-only! */
+ /*
+ * always reserve first page of ST-RAM, the first 2 KiB are
+ * supervisor-only!
+ */
if (!kernel_in_stram)
reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT);
-}
+ stram_pool.start = (resource_size_t)alloc_bootmem_low_pages(pool_size);
+ stram_pool.end = stram_pool.start + pool_size - 1;
+ request_resource(&iomem_resource, &stram_pool);
-void atari_stram_mem_init_hook (void)
-{
- mem_init_done = 1;
+ pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n",
+ pool_size, &stram_pool);
}
-/*
- * This is main public interface: somehow allocate a ST-RAM block
- *
- * - If we're before mem_init(), we have to make a static allocation. The
- * region is taken in the kernel data area (if the kernel is in ST-RAM) or
- * from the start of ST-RAM (if the kernel is in TT-RAM) and added to the
- * rsvd_stram_* region. The ST-RAM is somewhere in the middle of kernel
- * address space in the latter case.
- *
- * - If mem_init() already has been called, try with __get_dma_pages().
- * This has the disadvantage that it's very hard to get more than 1 page,
- * and it is likely to fail :-(
- *
- */
-void *atari_stram_alloc(long size, const char *owner)
+void *atari_stram_alloc(unsigned long size, const char *owner)
{
- void *addr = NULL;
- BLOCK *block;
- int flags;
-
- DPRINTK("atari_stram_alloc(size=%08lx,owner=%s)\n", size, owner);
-
- if (!mem_init_done)
- return alloc_bootmem_low(size);
- else {
- /* After mem_init(): can only resort to __get_dma_pages() */
- addr = (void *)__get_dma_pages(GFP_KERNEL, get_order(size));
- flags = BLOCK_GFP;
- DPRINTK( "atari_stram_alloc: after mem_init, "
- "get_pages=%p\n", addr );
+ struct resource *res;
+ int error;
+
+ pr_debug("atari_stram_alloc: allocate %lu bytes\n", size);
+
+ /* round up */
+ size = PAGE_ALIGN(size);
+
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ if (!res)
+ return NULL;
+
+ res->name = owner;
+ error = allocate_resource(&stram_pool, res, size, 0, UINT_MAX,
+ PAGE_SIZE, NULL, NULL);
+ if (error < 0) {
+ pr_err("atari_stram_alloc: allocate_resource() failed %d!\n",
+ error);
+ kfree(res);
+ return NULL;
}
- if (addr) {
- if (!(block = add_region( addr, size ))) {
- /* out of memory for BLOCK structure :-( */
- DPRINTK( "atari_stram_alloc: out of mem for BLOCK -- "
- "freeing again\n" );
- free_pages((unsigned long)addr, get_order(size));
- return( NULL );
- }
- block->owner = owner;
- block->flags |= flags;
- }
- return( addr );
+ pr_debug("atari_stram_alloc: returning %pR\n", res);
+ return (void *)res->start;
}
EXPORT_SYMBOL(atari_stram_alloc);
-void atari_stram_free( void *addr )
+void atari_stram_free(void *addr)
{
- BLOCK *block;
-
- DPRINTK( "atari_stram_free(addr=%p)\n", addr );
+ unsigned long start = (unsigned long)addr;
+ struct resource *res;
+ unsigned long size;
- if (!(block = find_region( addr ))) {
- printk( KERN_ERR "Attempt to free non-allocated ST-RAM block at %p "
- "from %p\n", addr, __builtin_return_address(0) );
+ res = lookup_resource(&stram_pool, start);
+ if (!res) {
+ pr_err("atari_stram_free: trying to free nonexistent region "
+ "at %p\n", addr);
return;
}
- DPRINTK( "atari_stram_free: found block (%p): size=%08lx, owner=%s, "
- "flags=%02x\n", block, block->size, block->owner, block->flags );
-
- if (!(block->flags & BLOCK_GFP))
- goto fail;
- DPRINTK("atari_stram_free: is kmalloced, order_size=%d\n",
- get_order(block->size));
- free_pages((unsigned long)addr, get_order(block->size));
- remove_region( block );
- return;
-
- fail:
- printk( KERN_ERR "atari_stram_free: cannot free block at %p "
- "(called from %p)\n", addr, __builtin_return_address(0) );
+ size = resource_size(res);
+ pr_debug("atari_stram_free: free %lu bytes at %p\n", size, addr);
+ release_resource(res);
+ kfree(res);
}
EXPORT_SYMBOL(atari_stram_free);
-
-
-/* ------------------------------------------------------------------------ */
-/* Region Management */
-/* ------------------------------------------------------------------------ */
-
-
-/* insert a region into the alloced list (sorted) */
-static BLOCK *add_region( void *addr, unsigned long size )
-{
- BLOCK **p, *n = NULL;
- int i;
-
- for( i = 0; i < N_STATIC_BLOCKS; ++i ) {
- if (static_blocks[i].flags & BLOCK_FREE) {
- n = &static_blocks[i];
- n->flags = 0;
- break;
- }
- }
- if (!n && mem_init_done) {
- /* if statics block pool exhausted and we can call kmalloc() already
- * (after mem_init()), try that */
- n = kmalloc( sizeof(BLOCK), GFP_KERNEL );
- if (n)
- n->flags = BLOCK_KMALLOCED;
- }
- if (!n) {
- printk( KERN_ERR "Out of memory for ST-RAM descriptor blocks\n" );
- return( NULL );
- }
- n->start = addr;
- n->size = size;
-
- for( p = &alloc_list; *p; p = &((*p)->next) )
- if ((*p)->start > addr) break;
- n->next = *p;
- *p = n;
-
- return( n );
-}
-
-
-/* find a region (by start addr) in the alloced list */
-static BLOCK *find_region( void *addr )
-{
- BLOCK *p;
-
- for( p = alloc_list; p; p = p->next ) {
- if (p->start == addr)
- return( p );
- if (p->start > addr)
- break;
- }
- return( NULL );
-}
-
-
-/* remove a block from the alloced list */
-static int remove_region( BLOCK *block )
-{
- BLOCK **p;
-
- for( p = &alloc_list; *p; p = &((*p)->next) )
- if (*p == block) break;
- if (!*p)
- return( 0 );
-
- *p = block->next;
- if (block->flags & BLOCK_KMALLOCED)
- kfree( block );
- else
- block->flags |= BLOCK_FREE;
- return( 1 );
-}
-
-
-
-/* ------------------------------------------------------------------------ */
-/* /proc statistics file stuff */
-/* ------------------------------------------------------------------------ */
-
-#ifdef DO_PROC
-
-#define PRINT_PROC(fmt,args...) seq_printf( m, fmt, ##args )
-
-static int stram_proc_show(struct seq_file *m, void *v)
-{
- BLOCK *p;
-
- PRINT_PROC("Total ST-RAM: %8u kB\n",
- (stram_end - stram_start) >> 10);
- PRINT_PROC( "Allocated regions:\n" );
- for( p = alloc_list; p; p = p->next ) {
- PRINT_PROC("0x%08lx-0x%08lx: %s (",
- virt_to_phys(p->start),
- virt_to_phys(p->start+p->size-1),
- p->owner);
- if (p->flags & BLOCK_GFP)
- PRINT_PROC( "page-alloced)\n" );
- else
- PRINT_PROC( "??)\n" );
- }
-
- return 0;
-}
-
-static int stram_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, stram_proc_show, NULL);
-}
-
-static const struct file_operations stram_proc_fops = {
- .open = stram_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init proc_stram_init(void)
-{
- proc_create("stram", 0, NULL, &stram_proc_fops);
- return 0;
-}
-module_init(proc_stram_init);
-#endif
-
-
-/*
- * Local variables:
- * c-indent-level: 4
- * tab-width: 4
- * End:
- */
diff --git a/arch/m68k/include/asm/atari_stram.h b/arch/m68k/include/asm/atari_stram.h
index 7546d13963b..62e27598af9 100644
--- a/arch/m68k/include/asm/atari_stram.h
+++ b/arch/m68k/include/asm/atari_stram.h
@@ -6,12 +6,11 @@
*/
/* public interface */
-void *atari_stram_alloc(long size, const char *owner);
+void *atari_stram_alloc(unsigned long size, const char *owner);
void atari_stram_free(void *);
/* functions called internally by other parts of the kernel */
void atari_stram_init(void);
void atari_stram_reserve_pages(void *start_mem);
-void atari_stram_mem_init_hook (void);
#endif /*_M68K_ATARI_STRAM_H */
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index f51f709bbf3..0392b28656a 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -399,8 +399,8 @@ struct CODEC
#define CODEC_OVERFLOW_LEFT 2
u_char unused2, unused3, unused4, unused5;
u_char gpio_directions;
-#define GPIO_IN 0
-#define GPIO_OUT 1
+#define CODEC_GPIO_IN 0
+#define CODEC_GPIO_OUT 1
u_char unused6;
u_char gpio_data;
};
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 334d8364037..c3b45061dd0 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -216,7 +216,9 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
void __init setup_arch(char **cmdline_p)
{
+#ifndef CONFIG_SUN3
int i;
+#endif
/* The bootinfo is located right after the kernel bss */
m68k_parse_bootinfo((const struct bi_record *)_end);
diff --git a/arch/m68k/math-emu/fp_log.c b/arch/m68k/math-emu/fp_log.c
index 367ecee2f98..3384a5244fb 100644
--- a/arch/m68k/math-emu/fp_log.c
+++ b/arch/m68k/math-emu/fp_log.c
@@ -105,9 +105,6 @@ fp_fetoxm1(struct fp_ext *dest, struct fp_ext *src)
fp_monadic_check(dest, src);
- if (IS_ZERO(dest))
- return dest;
-
return dest;
}
diff --git a/arch/m68k/math-emu/multi_arith.h b/arch/m68k/math-emu/multi_arith.h
index 4ad0ca918e2..4b5eb3d8563 100644
--- a/arch/m68k/math-emu/multi_arith.h
+++ b/arch/m68k/math-emu/multi_arith.h
@@ -19,246 +19,6 @@
#ifndef MULTI_ARITH_H
#define MULTI_ARITH_H
-#if 0 /* old code... */
-
-/* Unsigned only, because we don't need signs to multiply and divide. */
-typedef unsigned int int128[4];
-
-/* Word order */
-enum {
- MSW128,
- NMSW128,
- NLSW128,
- LSW128
-};
-
-/* big-endian */
-#define LO_WORD(ll) (((unsigned int *) &ll)[1])
-#define HI_WORD(ll) (((unsigned int *) &ll)[0])
-
-/* Convenience functions to stuff various integer values into int128s */
-
-static inline void zero128(int128 a)
-{
- a[LSW128] = a[NLSW128] = a[NMSW128] = a[MSW128] = 0;
-}
-
-/* Human-readable word order in the arguments */
-static inline void set128(unsigned int i3, unsigned int i2, unsigned int i1,
- unsigned int i0, int128 a)
-{
- a[LSW128] = i0;
- a[NLSW128] = i1;
- a[NMSW128] = i2;
- a[MSW128] = i3;
-}
-
-/* Convenience functions (for testing as well) */
-static inline void int64_to_128(unsigned long long src, int128 dest)
-{
- dest[LSW128] = (unsigned int) src;
- dest[NLSW128] = src >> 32;
- dest[NMSW128] = dest[MSW128] = 0;
-}
-
-static inline void int128_to_64(const int128 src, unsigned long long *dest)
-{
- *dest = src[LSW128] | (long long) src[NLSW128] << 32;
-}
-
-static inline void put_i128(const int128 a)
-{
- printk("%08x %08x %08x %08x\n", a[MSW128], a[NMSW128],
- a[NLSW128], a[LSW128]);
-}
-
-/* Internal shifters:
-
- Note that these are only good for 0 < count < 32.
- */
-
-static inline void _lsl128(unsigned int count, int128 a)
-{
- a[MSW128] = (a[MSW128] << count) | (a[NMSW128] >> (32 - count));
- a[NMSW128] = (a[NMSW128] << count) | (a[NLSW128] >> (32 - count));
- a[NLSW128] = (a[NLSW128] << count) | (a[LSW128] >> (32 - count));
- a[LSW128] <<= count;
-}
-
-static inline void _lsr128(unsigned int count, int128 a)
-{
- a[LSW128] = (a[LSW128] >> count) | (a[NLSW128] << (32 - count));
- a[NLSW128] = (a[NLSW128] >> count) | (a[NMSW128] << (32 - count));
- a[NMSW128] = (a[NMSW128] >> count) | (a[MSW128] << (32 - count));
- a[MSW128] >>= count;
-}
-
-/* Should be faster, one would hope */
-
-static inline void lslone128(int128 a)
-{
- asm volatile ("lsl.l #1,%0\n"
- "roxl.l #1,%1\n"
- "roxl.l #1,%2\n"
- "roxl.l #1,%3\n"
- :
- "=d" (a[LSW128]),
- "=d"(a[NLSW128]),
- "=d"(a[NMSW128]),
- "=d"(a[MSW128])
- :
- "0"(a[LSW128]),
- "1"(a[NLSW128]),
- "2"(a[NMSW128]),
- "3"(a[MSW128]));
-}
-
-static inline void lsrone128(int128 a)
-{
- asm volatile ("lsr.l #1,%0\n"
- "roxr.l #1,%1\n"
- "roxr.l #1,%2\n"
- "roxr.l #1,%3\n"
- :
- "=d" (a[MSW128]),
- "=d"(a[NMSW128]),
- "=d"(a[NLSW128]),
- "=d"(a[LSW128])
- :
- "0"(a[MSW128]),
- "1"(a[NMSW128]),
- "2"(a[NLSW128]),
- "3"(a[LSW128]));
-}
-
-/* Generalized 128-bit shifters:
-
- These bit-shift to a multiple of 32, then move whole longwords. */
-
-static inline void lsl128(unsigned int count, int128 a)
-{
- int wordcount, i;
-
- if (count % 32)
- _lsl128(count % 32, a);
-
- if (0 == (wordcount = count / 32))
- return;
-
- /* argh, gak, endian-sensitive */
- for (i = 0; i < 4 - wordcount; i++) {
- a[i] = a[i + wordcount];
- }
- for (i = 3; i >= 4 - wordcount; --i) {
- a[i] = 0;
- }
-}
-
-static inline void lsr128(unsigned int count, int128 a)
-{
- int wordcount, i;
-
- if (count % 32)
- _lsr128(count % 32, a);
-
- if (0 == (wordcount = count / 32))
- return;
-
- for (i = 3; i >= wordcount; --i) {
- a[i] = a[i - wordcount];
- }
- for (i = 0; i < wordcount; i++) {
- a[i] = 0;
- }
-}
-
-static inline int orl128(int a, int128 b)
-{
- b[LSW128] |= a;
-}
-
-static inline int btsthi128(const int128 a)
-{
- return a[MSW128] & 0x80000000;
-}
-
-/* test bits (numbered from 0 = LSB) up to and including "top" */
-static inline int bftestlo128(int top, const int128 a)
-{
- int r = 0;
-
- if (top > 31)
- r |= a[LSW128];
- if (top > 63)
- r |= a[NLSW128];
- if (top > 95)
- r |= a[NMSW128];
-
- r |= a[3 - (top / 32)] & ((1 << (top % 32 + 1)) - 1);
-
- return (r != 0);
-}
-
-/* Aargh. We need these because GCC is broken */
-/* FIXME: do them in assembly, for goodness' sake! */
-static inline void mask64(int pos, unsigned long long *mask)
-{
- *mask = 0;
-
- if (pos < 32) {
- LO_WORD(*mask) = (1 << pos) - 1;
- return;
- }
- LO_WORD(*mask) = -1;
- HI_WORD(*mask) = (1 << (pos - 32)) - 1;
-}
-
-static inline void bset64(int pos, unsigned long long *dest)
-{
- /* This conditional will be optimized away. Thanks, GCC! */
- if (pos < 32)
- asm volatile ("bset %1,%0":"=m"
- (LO_WORD(*dest)):"id"(pos));
- else
- asm volatile ("bset %1,%0":"=m"
- (HI_WORD(*dest)):"id"(pos - 32));
-}
-
-static inline int btst64(int pos, unsigned long long dest)
-{
- if (pos < 32)
- return (0 != (LO_WORD(dest) & (1 << pos)));
- else
- return (0 != (HI_WORD(dest) & (1 << (pos - 32))));
-}
-
-static inline void lsl64(int count, unsigned long long *dest)
-{
- if (count < 32) {
- HI_WORD(*dest) = (HI_WORD(*dest) << count)
- | (LO_WORD(*dest) >> count);
- LO_WORD(*dest) <<= count;
- return;
- }
- count -= 32;
- HI_WORD(*dest) = LO_WORD(*dest) << count;
- LO_WORD(*dest) = 0;
-}
-
-static inline void lsr64(int count, unsigned long long *dest)
-{
- if (count < 32) {
- LO_WORD(*dest) = (LO_WORD(*dest) >> count)
- | (HI_WORD(*dest) << (32 - count));
- HI_WORD(*dest) >>= count;
- return;
- }
- count -= 32;
- LO_WORD(*dest) = HI_WORD(*dest) >> count;
- HI_WORD(*dest) = 0;
-}
-#endif
-
static inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt)
{
reg->exp += cnt;
@@ -481,117 +241,6 @@ static inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src,
}
}
-#if 0
-static inline unsigned int fp_fls128(union fp_mant128 *src)
-{
- unsigned long data;
- unsigned int res, off;
-
- if ((data = src->m32[0]))
- off = 0;
- else if ((data = src->m32[1]))
- off = 32;
- else if ((data = src->m32[2]))
- off = 64;
- else if ((data = src->m32[3]))
- off = 96;
- else
- return 128;
-
- asm ("bfffo %1{#0,#32},%0" : "=d" (res) : "dm" (data));
- return res + off;
-}
-
-static inline void fp_shiftmant128(union fp_mant128 *src, int shift)
-{
- unsigned long sticky;
-
- switch (shift) {
- case 0:
- return;
- case 1:
- asm volatile ("lsl.l #1,%0"
- : "=d" (src->m32[3]) : "0" (src->m32[3]));
- asm volatile ("roxl.l #1,%0"
- : "=d" (src->m32[2]) : "0" (src->m32[2]));
- asm volatile ("roxl.l #1,%0"
- : "=d" (src->m32[1]) : "0" (src->m32[1]));
- asm volatile ("roxl.l #1,%0"
- : "=d" (src->m32[0]) : "0" (src->m32[0]));
- return;
- case 2 ... 31:
- src->m32[0] = (src->m32[0] << shift) | (src->m32[1] >> (32 - shift));
- src->m32[1] = (src->m32[1] << shift) | (src->m32[2] >> (32 - shift));
- src->m32[2] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
- src->m32[3] = (src->m32[3] << shift);
- return;
- case 32 ... 63:
- shift -= 32;
- src->m32[0] = (src->m32[1] << shift) | (src->m32[2] >> (32 - shift));
- src->m32[1] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
- src->m32[2] = (src->m32[3] << shift);
- src->m32[3] = 0;
- return;
- case 64 ... 95:
- shift -= 64;
- src->m32[0] = (src->m32[2] << shift) | (src->m32[3] >> (32 - shift));
- src->m32[1] = (src->m32[3] << shift);
- src->m32[2] = src->m32[3] = 0;
- return;
- case 96 ... 127:
- shift -= 96;
- src->m32[0] = (src->m32[3] << shift);
- src->m32[1] = src->m32[2] = src->m32[3] = 0;
- return;
- case -31 ... -1:
- shift = -shift;
- sticky = 0;
- if (src->m32[3] << (32 - shift))
- sticky = 1;
- src->m32[3] = (src->m32[3] >> shift) | (src->m32[2] << (32 - shift)) | sticky;
- src->m32[2] = (src->m32[2] >> shift) | (src->m32[1] << (32 - shift));
- src->m32[1] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift));
- src->m32[0] = (src->m32[0] >> shift);
- return;
- case -63 ... -32:
- shift = -shift - 32;
- sticky = 0;
- if ((src->m32[2] << (32 - shift)) || src->m32[3])
- sticky = 1;
- src->m32[3] = (src->m32[2] >> shift) | (src->m32[1] << (32 - shift)) | sticky;
- src->m32[2] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift));
- src->m32[1] = (src->m32[0] >> shift);
- src->m32[0] = 0;
- return;
- case -95 ... -64:
- shift = -shift - 64;
- sticky = 0;
- if ((src->m32[1] << (32 - shift)) || src->m32[2] || src->m32[3])
- sticky = 1;
- src->m32[3] = (src->m32[1] >> shift) | (src->m32[0] << (32 - shift)) | sticky;
- src->m32[2] = (src->m32[0] >> shift);
- src->m32[1] = src->m32[0] = 0;
- return;
- case -127 ... -96:
- shift = -shift - 96;
- sticky = 0;
- if ((src->m32[0] << (32 - shift)) || src->m32[1] || src->m32[2] || src->m32[3])
- sticky = 1;
- src->m32[3] = (src->m32[0] >> shift) | sticky;
- src->m32[2] = src->m32[1] = src->m32[0] = 0;
- return;
- }
-
- if (shift < 0 && (src->m32[0] || src->m32[1] || src->m32[2] || src->m32[3]))
- src->m32[3] = 1;
- else
- src->m32[3] = 0;
- src->m32[2] = 0;
- src->m32[1] = 0;
- src->m32[0] = 0;
-}
-#endif
-
static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
int shift)
{
@@ -637,183 +286,4 @@ static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
}
}
-#if 0 /* old code... */
-static inline int fls(unsigned int a)
-{
- int r;
-
- asm volatile ("bfffo %1{#0,#32},%0"
- : "=d" (r) : "md" (a));
- return r;
-}
-
-/* fls = "find last set" (cf. ffs(3)) */
-static inline int fls128(const int128 a)
-{
- if (a[MSW128])
- return fls(a[MSW128]);
- if (a[NMSW128])
- return fls(a[NMSW128]) + 32;
- /* XXX: it probably never gets beyond this point in actual
- use, but that's indicative of a more general problem in the
- algorithm (i.e. as per the actual 68881 implementation, we
- really only need at most 67 bits of precision [plus
- overflow]) so I'm not going to fix it. */
- if (a[NLSW128])
- return fls(a[NLSW128]) + 64;
- if (a[LSW128])
- return fls(a[LSW128]) + 96;
- else
- return -1;
-}
-
-static inline int zerop128(const int128 a)
-{
- return !(a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
-}
-
-static inline int nonzerop128(const int128 a)
-{
- return (a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
-}
-
-/* Addition and subtraction */
-/* Do these in "pure" assembly, because "extended" asm is unmanageable
- here */
-static inline void add128(const int128 a, int128 b)
-{
- /* rotating carry flags */
- unsigned int carry[2];
-
- carry[0] = a[LSW128] > (0xffffffff - b[LSW128]);
- b[LSW128] += a[LSW128];
-
- carry[1] = a[NLSW128] > (0xffffffff - b[NLSW128] - carry[0]);
- b[NLSW128] = a[NLSW128] + b[NLSW128] + carry[0];
-
- carry[0] = a[NMSW128] > (0xffffffff - b[NMSW128] - carry[1]);
- b[NMSW128] = a[NMSW128] + b[NMSW128] + carry[1];
-
- b[MSW128] = a[MSW128] + b[MSW128] + carry[0];
-}
-
-/* Note: assembler semantics: "b -= a" */
-static inline void sub128(const int128 a, int128 b)
-{
- /* rotating borrow flags */
- unsigned int borrow[2];
-
- borrow[0] = b[LSW128] < a[LSW128];
- b[LSW128] -= a[LSW128];
-
- borrow[1] = b[NLSW128] < a[NLSW128] + borrow[0];
- b[NLSW128] = b[NLSW128] - a[NLSW128] - borrow[0];
-
- borrow[0] = b[NMSW128] < a[NMSW128] + borrow[1];
- b[NMSW128] = b[NMSW128] - a[NMSW128] - borrow[1];
-
- b[MSW128] = b[MSW128] - a[MSW128] - borrow[0];
-}
-
-/* Poor man's 64-bit expanding multiply */
-static inline void mul64(unsigned long long a, unsigned long long b, int128 c)
-{
- unsigned long long acc;
- int128 acc128;
-
- zero128(acc128);
- zero128(c);
-
- /* first the low words */
- if (LO_WORD(a) && LO_WORD(b)) {
- acc = (long long) LO_WORD(a) * LO_WORD(b);
- c[NLSW128] = HI_WORD(acc);
- c[LSW128] = LO_WORD(acc);
- }
- /* Next the high words */
- if (HI_WORD(a) && HI_WORD(b)) {
- acc = (long long) HI_WORD(a) * HI_WORD(b);
- c[MSW128] = HI_WORD(acc);
- c[NMSW128] = LO_WORD(acc);
- }
- /* The middle words */
- if (LO_WORD(a) && HI_WORD(b)) {
- acc = (long long) LO_WORD(a) * HI_WORD(b);
- acc128[NMSW128] = HI_WORD(acc);
- acc128[NLSW128] = LO_WORD(acc);
- add128(acc128, c);
- }
- /* The first and last words */
- if (HI_WORD(a) && LO_WORD(b)) {
- acc = (long long) HI_WORD(a) * LO_WORD(b);
- acc128[NMSW128] = HI_WORD(acc);
- acc128[NLSW128] = LO_WORD(acc);
- add128(acc128, c);
- }
-}
-
-/* Note: unsigned */
-static inline int cmp128(int128 a, int128 b)
-{
- if (a[MSW128] < b[MSW128])
- return -1;
- if (a[MSW128] > b[MSW128])
- return 1;
- if (a[NMSW128] < b[NMSW128])
- return -1;
- if (a[NMSW128] > b[NMSW128])
- return 1;
- if (a[NLSW128] < b[NLSW128])
- return -1;
- if (a[NLSW128] > b[NLSW128])
- return 1;
-
- return (signed) a[LSW128] - b[LSW128];
-}
-
-inline void div128(int128 a, int128 b, int128 c)
-{
- int128 mask;
-
- /* Algorithm:
-
- Shift the divisor until it's at least as big as the
- dividend, keeping track of the position to which we've
- shifted it, i.e. the power of 2 which we've multiplied it
- by.
-
- Then, for this power of 2 (the mask), and every one smaller
- than it, subtract the mask from the dividend and add it to
- the quotient until the dividend is smaller than the raised
- divisor. At this point, divide the dividend and the mask
- by 2 (i.e. shift one place to the right). Lather, rinse,
- and repeat, until there are no more powers of 2 left. */
-
- /* FIXME: needless to say, there's room for improvement here too. */
-
- /* Shift up */
- /* XXX: since it just has to be "at least as big", we can
- probably eliminate this horribly wasteful loop. I will
- have to prove this first, though */
- set128(0, 0, 0, 1, mask);
- while (cmp128(b, a) < 0 && !btsthi128(b)) {
- lslone128(b);
- lslone128(mask);
- }
-
- /* Shift down */
- zero128(c);
- do {
- if (cmp128(a, b) >= 0) {
- sub128(b, a);
- add128(mask, c);
- }
- lsrone128(mask);
- lsrone128(b);
- } while (nonzerop128(mask));
-
- /* The remainder is in a... */
-}
-#endif
-
#endif /* MULTI_ARITH_H */
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index 9113c2f1760..bbe525434cc 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -83,11 +83,6 @@ void __init mem_init(void)
int initpages = 0;
int i;
-#ifdef CONFIG_ATARI
- if (MACH_IS_ATARI)
- atari_stram_mem_init_hook();
-#endif
-
/* this will put all memory onto the freelists */
totalram_pages = num_physpages = 0;
for_each_online_pgdat(pgdat) {
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 65adc86a230..e077b0bf56c 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -15,6 +15,7 @@ config PARISC
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index b1dc71f5534..4054b31e0fa 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -258,10 +258,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
-static __inline__ int
+static __inline__ s64
__atomic64_add_return(s64 i, atomic64_t *v)
{
- int ret;
+ s64 ret;
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 67a33cc27ef..2388bdb3283 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -5,11 +5,14 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
+#include <asm/atomic.h>
#include <asm/errno.h>
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
+ unsigned long int flags;
+ u32 val;
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
@@ -18,21 +21,58 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
return -EFAULT;
pagefault_disable();
+ _atomic_spin_lock_irqsave(uaddr, flags);
+
switch (op) {
case FUTEX_OP_SET:
+ /* *(int *)UADDR2 = OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret)
+ ret = put_user(oparg, uaddr);
+ break;
case FUTEX_OP_ADD:
+ /* *(int *)UADDR2 += OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval + oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
case FUTEX_OP_OR:
+ /* *(int *)UADDR2 |= OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval | oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
case FUTEX_OP_ANDN:
+ /* *(int *)UADDR2 &= ~OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval & ~oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
case FUTEX_OP_XOR:
+ /* *(int *)UADDR2 ^= OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval ^ oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
default:
ret = -ENOSYS;
}
+ _atomic_spin_unlock_irqrestore(uaddr, flags);
+
pagefault_enable();
if (!ret) {
@@ -54,7 +94,9 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ int ret;
u32 val;
+ unsigned long flags;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
@@ -65,12 +107,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
- if (get_user(val, uaddr))
- return -EFAULT;
- if (val == oldval && put_user(newval, uaddr))
- return -EFAULT;
+ /* HPPA has no cmpxchg in hardware and therefore the
+ * best we can do here is use an array of locks. The
+ * lock selected is based on a hash of the userspace
+ * address. This should scale to a couple of CPUs.
+ */
+
+ _atomic_spin_lock_irqsave(uaddr, flags);
+
+ ret = get_user(val, uaddr);
+
+ if (!ret && val == oldval)
+ ret = put_user(newval, uaddr);
+
*uval = val;
- return 0;
+
+ _atomic_spin_unlock_irqrestore(uaddr, flags);
+
+ return ret;
}
#endif /*__KERNEL__*/
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 3392de3e7be..d61de64f990 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -821,8 +821,9 @@
#define __NR_open_by_handle_at (__NR_Linux + 326)
#define __NR_syncfs (__NR_Linux + 327)
#define __NR_setns (__NR_Linux + 328)
+#define __NR_sendmmsg (__NR_Linux + 329)
-#define __NR_Linux_syscalls (__NR_setns + 1)
+#define __NR_Linux_syscalls (__NR_sendmmsg + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 34a4f5a2fff..e66366fd2ab 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -427,6 +427,7 @@
ENTRY_COMP(open_by_handle_at)
ENTRY_SAME(syncfs)
ENTRY_SAME(setns)
+ ENTRY_COMP(sendmmsg)
/* Nothing yet */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 374c475e56a..6926b61acfe 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -136,6 +136,7 @@ config PPC
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_BPF_JIT if (PPC64 && NET)
select HAVE_ARCH_JUMP_LABEL
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
config EARLY_PRINTK
bool
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c03fef7a9c2..ed5cb5af528 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -81,6 +81,7 @@ config S390
select INIT_ALL_POSSIBLE
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
@@ -273,11 +274,11 @@ config MARCH_Z10
on older machines.
config MARCH_Z196
- bool "IBM zEnterprise 196"
+ bool "IBM zEnterprise 114 and 196"
help
- Select this to enable optimizations for IBM zEnterprise 196
- (2817 series). The kernel will be slightly faster but will not work
- on older machines.
+ Select this to enable optimizations for IBM zEnterprise 114 and 196
+ (2818 and 2817 series). The kernel will be slightly faster but will
+ not work on older machines.
endchoice
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 5e95d95450b..97cc4403fab 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -167,5 +167,6 @@ enum diag308_rc {
};
extern int diag308(unsigned long subcode, void *addr);
+extern void diag308_reset(void);
#endif /* _ASM_S390_IPL_H */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index f26280d9e88..e85c911aabf 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -18,6 +18,7 @@ void system_call(void);
void pgm_check_handler(void);
void mcck_int_handler(void);
void io_int_handler(void);
+void psw_restart_int_handler(void);
#ifdef CONFIG_32BIT
@@ -150,7 +151,10 @@ struct _lowcore {
*/
__u32 ipib; /* 0x0e00 */
__u32 ipib_checksum; /* 0x0e04 */
- __u8 pad_0x0e08[0x0f00-0x0e08]; /* 0x0e08 */
+
+ /* 64 bit save area */
+ __u64 save_area_64; /* 0x0e08 */
+ __u8 pad_0x0e10[0x0f00-0x0e10]; /* 0x0e10 */
/* Extended facility list */
__u64 stfle_fac_list[32]; /* 0x0f00 */
@@ -286,7 +290,10 @@ struct _lowcore {
*/
__u64 ipib; /* 0x0e00 */
__u32 ipib_checksum; /* 0x0e08 */
- __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */
+
+ /* 64 bit save area */
+ __u64 save_area_64; /* 0x0e0c */
+ __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */
/* Extended facility list */
__u64 stfle_fac_list[32]; /* 0x0f00 */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 55dfcc8bdc0..a4b6229e5d4 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -119,14 +119,12 @@ struct stack_frame {
* Do necessary setup to start up a new thread.
*/
#define start_thread(regs, new_psw, new_stackp) do { \
- set_fs(USER_DS); \
regs->psw.mask = psw_user_bits; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
- set_fs(USER_DS); \
regs->psw.mask = psw_user32_bits; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index d382629a017..6582f69f238 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -113,6 +113,7 @@ extern void pfault_fini(void);
extern void cmma_init(void);
extern int memcpy_real(void *, void *, size_t);
+extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
#define finish_arch_switch(prev) do { \
set_fs(current->thread.mm_segment); \
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 05d8f38734e..532fd432215 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -27,12 +27,9 @@ int main(void)
BLANK();
DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK();
- DEFINE(__THREAD_per_cause,
- offsetof(struct task_struct, thread.per_event.cause));
- DEFINE(__THREAD_per_address,
- offsetof(struct task_struct, thread.per_event.address));
- DEFINE(__THREAD_per_paid,
- offsetof(struct task_struct, thread.per_event.paid));
+ DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
+ DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
+ DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
@@ -142,6 +139,7 @@ int main(void)
DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
+ DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64));
#ifdef CONFIG_32BIT
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
#else /* CONFIG_32BIT */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 209938c1dfc..255435663bf 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -76,6 +76,42 @@ s390_base_pgm_handler_fn:
.quad 0
.previous
+#
+# Calls diag 308 subcode 1 and continues execution
+#
+# The following conditions must be ensured before calling this function:
+# * Prefix register = 0
+# * Lowcore protection is disabled
+#
+ENTRY(diag308_reset)
+ larl %r4,.Lctlregs # Save control registers
+ stctg %c0,%c15,0(%r4)
+ larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
+ lghi %r3,0
+ lg %r4,0(%r4) # Save PSW
+ sturg %r4,%r3 # Use sturg, because of large pages
+ lghi %r1,1
+ diag %r1,%r1,0x308
+.Lrestart_part2:
+ lhi %r0,0 # Load r0 with zero
+ lhi %r1,2 # Use mode 2 = ESAME (dump)
+ sigp %r1,%r0,0x12 # Switch to ESAME mode
+ sam64 # Switch to 64 bit addressing mode
+ larl %r4,.Lctlregs # Restore control registers
+ lctlg %c0,%c15,0(%r4)
+ br %r14
+.align 16
+.Lrestart_psw:
+ .long 0x00080000,0x80000000 + .Lrestart_part2
+
+ .section .bss
+.align 8
+.Lctlregs:
+ .rept 16
+ .quad 0
+ .endr
+ .previous
+
#else /* CONFIG_64BIT */
ENTRY(s390_base_mcck_handler)
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index eee999853a7..a9a285b8c4a 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -380,20 +380,13 @@ asmlinkage long sys32_sigreturn(void)
goto badframe;
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
goto badframe;
-
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
+ set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_gprs_high(regs, frame->gprs_high))
goto badframe;
-
return regs->gprs[2];
-
badframe:
force_sig(SIGSEGV, current);
return 0;
@@ -413,31 +406,22 @@ asmlinkage long sys32_rt_sigreturn(void)
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
-
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
+ set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_gprs_high(regs, frame->gprs_high))
goto badframe;
-
err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
st.ss_sp = compat_ptr(ss_sp);
err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
if (err)
goto badframe;
-
set_fs (KERNEL_DS);
do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
set_fs (old_fs);
-
return regs->gprs[2];
-
badframe:
force_sig(SIGSEGV, current);
return 0;
@@ -605,10 +589,10 @@ give_sigsegv:
* OK, we're invoking a handler
*/
-int
-handle_signal32(unsigned long sig, struct k_sigaction *ka,
- siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+int handle_signal32(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
+ sigset_t blocked;
int ret;
/* Set up the stack frame */
@@ -616,15 +600,12 @@ handle_signal32(unsigned long sig, struct k_sigaction *ka,
ret = setup_rt_frame32(sig, ka, info, oldset, regs);
else
ret = setup_frame32(sig, ka, oldset, regs);
-
- if (ret == 0) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
- return ret;
+ if (ret)
+ return ret;
+ sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&blocked, sig);
+ set_current_blocked(&blocked);
+ return 0;
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3eab7cfab07..02ec8fe7d03 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -849,6 +849,34 @@ restart_crash:
restart_go:
#endif
+#
+# PSW restart interrupt handler
+#
+ENTRY(psw_restart_int_handler)
+ st %r15,__LC_SAVE_AREA_64(%r0) # save r15
+ basr %r15,0
+0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
+ l %r15,0(%r15)
+ ahi %r15,-SP_SIZE # make room for pt_regs
+ stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
+ mvc SP_R15(4,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
+ mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+ basr %r14,0
+1: l %r14,.Ldo_restart-1b(%r14)
+ basr %r14,%r14
+
+ basr %r14,0 # load disabled wait PSW if
+2: lpsw restart_psw_crash-2b(%r14) # do_restart returns
+ .align 4
+.Ldo_restart:
+ .long do_restart
+.Lrestart_stack:
+ .long restart_stack
+ .align 8
+restart_psw_crash:
+ .long 0x000a0000,0x00000000 + restart_psw_crash
+
.section .kprobes.text, "ax"
#ifdef CONFIG_CHECK_STACK
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 7a0fd426ca9..5f729d627ce 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -865,6 +865,26 @@ restart_crash:
restart_go:
#endif
+#
+# PSW restart interrupt handler
+#
+ENTRY(psw_restart_int_handler)
+ stg %r15,__LC_SAVE_AREA_64(%r0) # save r15
+ larl %r15,restart_stack # load restart stack
+ lg %r15,0(%r15)
+ aghi %r15,-SP_SIZE # make room for pt_regs
+ stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
+ mvc SP_R15(8,%r15),__LC_SAVE_AREA_64(%r0)# store saved %r15 to stack
+ mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+ brasl %r14,do_restart
+
+ larl %r14,restart_psw_crash # load disabled wait PSW if
+ lpswe 0(%r14) # do_restart returns
+ .align 8
+restart_psw_crash:
+ .quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash
+
.section .kprobes.text, "ax"
#ifdef CONFIG_CHECK_STACK
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index a689070be28..04361d5a427 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -45,11 +45,13 @@
* - halt
* - power off
* - reipl
+ * - restart
*/
#define ON_PANIC_STR "on_panic"
#define ON_HALT_STR "on_halt"
#define ON_POFF_STR "on_poff"
#define ON_REIPL_STR "on_reboot"
+#define ON_RESTART_STR "on_restart"
struct shutdown_action;
struct shutdown_trigger {
@@ -1544,17 +1546,20 @@ static char vmcmd_on_reboot[128];
static char vmcmd_on_panic[128];
static char vmcmd_on_halt[128];
static char vmcmd_on_poff[128];
+static char vmcmd_on_restart[128];
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_restart, "%s\n", "%s\n", vmcmd_on_restart);
static struct attribute *vmcmd_attrs[] = {
&sys_vmcmd_on_reboot_attr.attr,
&sys_vmcmd_on_panic_attr.attr,
&sys_vmcmd_on_halt_attr.attr,
&sys_vmcmd_on_poff_attr.attr,
+ &sys_vmcmd_on_restart_attr.attr,
NULL,
};
@@ -1576,6 +1581,8 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
cmd = vmcmd_on_halt;
else if (strcmp(trigger->name, ON_POFF_STR) == 0)
cmd = vmcmd_on_poff;
+ else if (strcmp(trigger->name, ON_RESTART_STR) == 0)
+ cmd = vmcmd_on_restart;
else
return;
@@ -1707,6 +1714,34 @@ static void do_panic(void)
stop_run(&on_panic_trigger);
}
+/* on restart */
+
+static struct shutdown_trigger on_restart_trigger = {ON_RESTART_STR,
+ &reipl_action};
+
+static ssize_t on_restart_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%s\n", on_restart_trigger.action->name);
+}
+
+static ssize_t on_restart_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return set_trigger(buf, &on_restart_trigger, len);
+}
+
+static struct kobj_attribute on_restart_attr =
+ __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
+
+void do_restart(void)
+{
+ smp_send_stop();
+ on_restart_trigger.action->fn(&on_restart_trigger);
+ stop_run(&on_restart_trigger);
+}
+
/* on halt */
static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
@@ -1783,7 +1818,9 @@ static void __init shutdown_triggers_init(void)
if (sysfs_create_file(&shutdown_actions_kset->kobj,
&on_poff_attr.attr))
goto fail;
-
+ if (sysfs_create_file(&shutdown_actions_kset->kobj,
+ &on_restart_attr.attr))
+ goto fail;
return;
fail:
panic("shutdown_triggers_init failed\n");
@@ -1959,6 +1996,12 @@ static void do_reset_calls(void)
{
struct reset_call *reset;
+#ifdef CONFIG_64BIT
+ if (diag308_set_works) {
+ diag308_reset();
+ return;
+ }
+#endif
list_for_each_entry(reset, &rcall, list)
reset->fn();
}
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 78eb7cfbd3d..e690975403f 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp 2000,2009
+ * Copyright IBM Corp 2000,2011
* Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
* Denis Joseph Barrow,
*/
@@ -8,6 +8,64 @@
#include <asm/asm-offsets.h>
#
+# store_status
+#
+# Prerequisites to run this function:
+# - Prefix register is set to zero
+# - Original prefix register is stored in "dump_prefix_page"
+# - Lowcore protection is off
+#
+ENTRY(store_status)
+ /* Save register one and load save area base */
+ stg %r1,__LC_SAVE_AREA_64(%r0)
+ lghi %r1,SAVE_AREA_BASE
+ /* General purpose registers */
+ stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ lg %r2,__LC_SAVE_AREA_64(%r0)
+ stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
+ /* Control registers */
+ stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Access registers */
+ stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Floating point registers */
+ std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Floating point control register */
+ stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* CPU timer */
+ stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Saved prefix register */
+ larl %r2,dump_prefix_page
+ mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
+ /* Clock comparator - seven bytes */
+ larl %r2,.Lclkcmp
+ stckc 0(%r2)
+ mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
+ /* Program status word */
+ epsw %r2,%r3
+ st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
+ st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
+ larl %r2,store_status
+ stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
+ br %r14
+.align 8
+.Lclkcmp: .quad 0x0000000000000000
+
+#
# do_reipl_asm
# Parameter: r2 = schid of reipl device
#
@@ -15,22 +73,7 @@
ENTRY(do_reipl_asm)
basr %r13,0
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
-.Lpg1: # do store status of all registers
-
- stg %r1,.Lregsave-.Lpg0(%r13)
- lghi %r1,0x1000
- stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
- lg %r0,.Lregsave-.Lpg0(%r13)
- stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
- stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
- stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
- lg %r10,.Ldump_pfx-.Lpg0(%r13)
- mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
- stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
- stckc .Lclkcmp-.Lpg0(%r13)
- mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(7,%r1),.Lclkcmp-.Lpg0(%r13)
- stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
- stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
+.Lpg1: brasl %r14,store_status
lctlg %c6,%c6,.Lall-.Lpg0(%r13)
lgr %r1,%r2
@@ -67,10 +110,7 @@ ENTRY(do_reipl_asm)
st %r14,.Ldispsw+12-.Lpg0(%r13)
lpswe .Ldispsw-.Lpg0(%r13)
.align 8
-.Lclkcmp: .quad 0x0000000000000000
.Lall: .quad 0x00000000ff000000
-.Ldump_pfx: .quad dump_prefix_page
-.Lregsave: .quad 0x0000000000000000
.align 16
/*
* These addresses have to be 31 bit otherwise
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0c35dee10b0..7b371c37061 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -346,7 +346,7 @@ setup_lowcore(void)
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
lc->restart_psw.addr =
- PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
+ PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
if (user_mode != HOME_SPACE_MODE)
lc->restart_psw.mask |= PSW_ASC_HOME;
lc->external_new_psw.mask = psw_kernel_bits;
@@ -529,6 +529,27 @@ static void __init setup_memory_end(void)
memory_end = memory_size;
}
+void *restart_stack __attribute__((__section__(".data")));
+
+/*
+ * Setup new PSW and allocate stack for PSW restart interrupt
+ */
+static void __init setup_restart_psw(void)
+{
+ psw_t psw;
+
+ restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
+ restart_stack += ASYNC_SIZE;
+
+ /*
+ * Setup restart PSW for absolute zero lowcore. This is necesary
+ * if PSW restart is done on an offline CPU that has lowcore zero
+ */
+ psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
+ psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
+ copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
+}
+
static void __init
setup_memory(void)
{
@@ -731,6 +752,7 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z10");
break;
case 0x2817:
+ case 0x2818:
strcpy(elf_platform, "z196");
break;
}
@@ -792,6 +814,7 @@ setup_arch(char **cmdline_p)
setup_addressing_mode();
setup_memory();
setup_resources();
+ setup_restart_psw();
setup_lowcore();
cpu_init();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index abbb3c3c7aa..9a40e1cc5ec 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -57,17 +57,15 @@ typedef struct
*/
SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
{
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+ current->saved_sigmask = current->blocked;
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
-
+ set_restore_sigmask();
return -ERESTARTNOHAND;
}
@@ -172,18 +170,11 @@ SYSCALL_DEFINE0(sigreturn)
goto badframe;
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
goto badframe;
-
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
+ set_current_blocked(&set);
if (restore_sigregs(regs, &frame->sregs))
goto badframe;
-
return regs->gprs[2];
-
badframe:
force_sig(SIGSEGV, current);
return 0;
@@ -199,21 +190,14 @@ SYSCALL_DEFINE0(rt_sigreturn)
goto badframe;
if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
-
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
+ set_current_blocked(&set);
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe;
-
if (do_sigaltstack(&frame->uc.uc_stack, NULL,
regs->gprs[15]) == -EFAULT)
goto badframe;
return regs->gprs[2];
-
badframe:
force_sig(SIGSEGV, current);
return 0;
@@ -385,14 +369,11 @@ give_sigsegv:
return -EFAULT;
}
-/*
- * OK, we're invoking a handler
- */
-
-static int
-handle_signal(unsigned long sig, struct k_sigaction *ka,
- siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+static int handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset,
+ struct pt_regs *regs)
{
+ sigset_t blocked;
int ret;
/* Set up the stack frame */
@@ -400,17 +381,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
ret = setup_frame(sig, ka, oldset, regs);
-
- if (ret == 0) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
-
- return ret;
+ if (ret)
+ return ret;
+ sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&blocked, sig);
+ set_current_blocked(&blocked);
+ return 0;
}
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a6d85c0a7f2..6ab16ac64d2 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -452,23 +452,27 @@ out:
*/
int __cpuinit start_secondary(void *cpuvoid)
{
- /* Setup the cpu */
cpu_init();
preempt_disable();
- /* Enable TOD clock interrupts on the secondary cpu. */
init_cpu_timer();
- /* Enable cpu timer interrupts on the secondary cpu. */
init_cpu_vtimer();
- /* Enable pfault pseudo page faults on this cpu. */
pfault_init();
- /* call cpu notifiers */
notify_cpu_starting(smp_processor_id());
- /* Mark this cpu as online */
ipi_call_lock();
set_cpu_online(smp_processor_id(), true);
ipi_call_unlock();
- /* Switch on interrupts */
+ __ctl_clear_bit(0, 28); /* Disable lowcore protection */
+ S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
+ S390_lowcore.restart_psw.addr =
+ PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
+ __ctl_set_bit(0, 28); /* Enable lowcore protection */
+ /*
+ * Wait until the cpu which brought this one up marked it
+ * active before enabling interrupts.
+ */
+ while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
+ cpu_relax();
local_irq_enable();
/* cpu_idle will call schedule for us */
cpu_idle();
@@ -507,7 +511,11 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
lowcore->async_stack = async_stack + ASYNC_SIZE;
lowcore->panic_stack = panic_stack + PAGE_SIZE;
-
+ lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
+ lowcore->restart_psw.addr =
+ PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
+ if (user_mode != HOME_SPACE_MODE)
+ lowcore->restart_psw.mask |= PSW_ASC_HOME;
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
unsigned long save_area;
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 51e5cd9b906..5dbbaa6e594 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -85,3 +85,19 @@ int memcpy_real(void *dest, void *src, size_t count)
arch_local_irq_restore(flags);
return rc;
}
+
+/*
+ * Copy memory to absolute zero
+ */
+void copy_to_absolute_zero(void *dest, void *src, size_t count)
+{
+ unsigned long cr0;
+
+ BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
+ preempt_disable();
+ __ctl_store(cr0, 0, 0);
+ __ctl_clear_bit(0, 28); /* disable lowcore protection */
+ memcpy_real(dest + store_prefix(), src, count);
+ __ctl_load(cr0, 0, 0);
+ preempt_enable();
+}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 2adb23938a7..4d1f2bce87b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -528,6 +528,7 @@ static inline void page_table_free_pgste(unsigned long *table)
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
unsigned long vmaddr)
{
+ return NULL;
}
static inline void page_table_free_pgste(unsigned long *table)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 748ff192006..ff9177c8f64 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -11,6 +11,7 @@ config SUPERH
select HAVE_DMA_ATTRS
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
select PERF_USE_VMALLOC
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index e3d8170ad00..99385d0b3f3 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -173,6 +173,7 @@ core-$(CONFIG_HD6446X_SERIES) += arch/sh/cchips/hd6446x/
cpuincdir-$(CONFIG_CPU_SH2A) += cpu-sh2a
cpuincdir-$(CONFIG_CPU_SH2) += cpu-sh2
cpuincdir-$(CONFIG_CPU_SH3) += cpu-sh3
+cpuincdir-$(CONFIG_CPU_SH4A) += cpu-sh4a
cpuincdir-$(CONFIG_CPU_SH4) += cpu-sh4
cpuincdir-$(CONFIG_CPU_SH5) += cpu-sh5
cpuincdir-y += cpu-common # Must be last
diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c
index 8e2a27057bc..2823619c600 100644
--- a/arch/sh/boards/board-apsh4a3a.c
+++ b/arch/sh/boards/board-apsh4a3a.c
@@ -116,7 +116,7 @@ static int apsh4a3a_clk_init(void)
int ret;
clk = clk_get(NULL, "extal");
- if (!clk || IS_ERR(clk))
+ if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);
diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c
index e2bd218a054..b4d6292a924 100644
--- a/arch/sh/boards/board-apsh4ad0a.c
+++ b/arch/sh/boards/board-apsh4ad0a.c
@@ -94,7 +94,7 @@ static int apsh4ad0a_clk_init(void)
int ret;
clk = clk_get(NULL, "extal");
- if (!clk || IS_ERR(clk))
+ if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c
index ee65ff05c55..d879848f3cd 100644
--- a/arch/sh/boards/board-sh7785lcr.c
+++ b/arch/sh/boards/board-sh7785lcr.c
@@ -299,7 +299,7 @@ static int sh7785lcr_clk_init(void)
int ret;
clk = clk_get(NULL, "extal");
- if (!clk || IS_ERR(clk))
+ if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333333);
clk_put(clk);
diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c
index d81c609decc..24e3316c5c1 100644
--- a/arch/sh/boards/board-urquell.c
+++ b/arch/sh/boards/board-urquell.c
@@ -190,7 +190,7 @@ static int urquell_clk_init(void)
return -EINVAL;
clk = clk_get(NULL, "extal");
- if (!clk || IS_ERR(clk))
+ if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333333);
clk_put(clk);
diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c
index 87618c91d17..74b8db1b74a 100644
--- a/arch/sh/boards/mach-highlander/setup.c
+++ b/arch/sh/boards/mach-highlander/setup.c
@@ -335,8 +335,6 @@ static struct clk *r7780rp_clocks[] = {
&ivdr_clk,
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("ivdr_clk", &ivdr_clk),
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
index 1521aa75ee3..486d1ac3694 100644
--- a/arch/sh/boards/mach-sdk7786/setup.c
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -194,7 +194,7 @@ static int sdk7786_clk_init(void)
return -EINVAL;
clk = clk_get(NULL, "extal");
- if (!clk || IS_ERR(clk))
+ if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333333);
clk_put(clk);
diff --git a/arch/sh/include/cpu-sh3/cpu/serial.h b/arch/sh/include/cpu-sh3/cpu/serial.h
new file mode 100644
index 00000000000..7766329bc10
--- /dev/null
+++ b/arch/sh/include/cpu-sh3/cpu/serial.h
@@ -0,0 +1,10 @@
+#ifndef __CPU_SH3_SERIAL_H
+#define __CPU_SH3_SERIAL_H
+
+#include <linux/serial_sci.h>
+
+extern struct plat_sci_port_ops sh770x_sci_port_ops;
+extern struct plat_sci_port_ops sh7710_sci_port_ops;
+extern struct plat_sci_port_ops sh7720_sci_port_ops;
+
+#endif /* __CPU_SH3_SERIAL_H */
diff --git a/arch/sh/include/cpu-sh4a/cpu/serial.h b/arch/sh/include/cpu-sh4a/cpu/serial.h
new file mode 100644
index 00000000000..ff1bc275d21
--- /dev/null
+++ b/arch/sh/include/cpu-sh4a/cpu/serial.h
@@ -0,0 +1,7 @@
+#ifndef __CPU_SH4A_SERIAL_H
+#define __CPU_SH4A_SERIAL_H
+
+/* arch/sh/kernel/cpu/sh4a/serial-sh7722.c */
+extern struct plat_sci_port_ops sh7722_sci_port_ops;
+
+#endif /* __CPU_SH4A_SERIAL_H */
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c
index 8f63a264a84..f59b1f30d44 100644
--- a/arch/sh/kernel/cpu/clock-cpg.c
+++ b/arch/sh/kernel/cpu/clock-cpg.c
@@ -35,8 +35,6 @@ static struct clk *onchip_clocks[] = {
&cpu_clk,
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("master_clk", &master_clk),
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index ecab274141a..6f13f33a35f 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -7,15 +7,15 @@ obj-y := ex.o probe.o entry.o setup-sh3.o
obj-$(CONFIG_HIBERNATION) += swsusp.o
# CPU subtype setup
-obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o serial-sh7710.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o serial-sh7710.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o serial-sh7720.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o serial-sh7720.o
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH3) := clock-sh3.o
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh770x.c b/arch/sh/kernel/cpu/sh3/serial-sh770x.c
new file mode 100644
index 00000000000..4f7242c676b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/serial-sh770x.c
@@ -0,0 +1,33 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <cpu/serial.h>
+
+#define SCPCR 0xA4000116
+#define SCPDR 0xA4000136
+
+static void sh770x_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ /* We need to set SCPCR to enable RTS/CTS */
+ data = __raw_readw(SCPCR);
+ /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
+ __raw_writew(data & 0x0fcf, SCPCR);
+
+ if (!(cflag & CRTSCTS)) {
+ /* We need to set SCPCR to enable RTS/CTS */
+ data = __raw_readw(SCPCR);
+ /* Clear out SCP7MD1,0, SCP4MD1,0,
+ Set SCP6MD1,0 = {01} (output) */
+ __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
+
+ data = __raw_readb(SCPDR);
+ /* Set /RTS2 (bit6) = 0 */
+ __raw_writeb(data & 0xbf, SCPDR);
+ }
+}
+
+struct plat_sci_port_ops sh770x_sci_port_ops = {
+ .init_pins = sh770x_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7710.c b/arch/sh/kernel/cpu/sh3/serial-sh7710.c
new file mode 100644
index 00000000000..42190ef6aeb
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/serial-sh7710.c
@@ -0,0 +1,20 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <cpu/serial.h>
+
+#define PACR 0xa4050100
+#define PBCR 0xa4050102
+
+static void sh7710_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ if (port->mapbase == 0xA4400000) {
+ __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
+ __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
+ } else if (port->mapbase == 0xA4410000)
+ __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
+}
+
+struct plat_sci_port_ops sh7710_sci_port_ops = {
+ .init_pins = sh7710_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7720.c b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
new file mode 100644
index 00000000000..8832c526cdf
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
@@ -0,0 +1,37 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <cpu/serial.h>
+#include <asm/gpio.h>
+
+static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ if (cflag & CRTSCTS) {
+ /* enable RTS/CTS */
+ if (port->mapbase == 0xa4430000) { /* SCIF0 */
+ /* Clear PTCR bit 9-2; enable all scif pins but sck */
+ data = __raw_readw(PORT_PTCR);
+ __raw_writew((data & 0xfc03), PORT_PTCR);
+ } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
+ /* Clear PVCR bit 9-2 */
+ data = __raw_readw(PORT_PVCR);
+ __raw_writew((data & 0xfc03), PORT_PVCR);
+ }
+ } else {
+ if (port->mapbase == 0xa4430000) { /* SCIF0 */
+ /* Clear PTCR bit 5-2; enable only tx and rx */
+ data = __raw_readw(PORT_PTCR);
+ __raw_writew((data & 0xffc3), PORT_PTCR);
+ } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
+ /* Clear PVCR bit 5-2 */
+ data = __raw_readw(PORT_PVCR);
+ __raw_writew((data & 0xffc3), PORT_PVCR);
+ }
+ }
+}
+
+struct plat_sci_port_ops sh7720_sci_port_ops = {
+ .init_pins = sh7720_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index cd2e702feb7..2309618c015 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -15,6 +15,7 @@
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <asm/rtc.h>
+#include <cpu/serial.h>
enum {
UNUSED = 0,
@@ -75,6 +76,8 @@ static struct plat_sci_port scif0_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.irqs = { 56, 56, 56 },
+ .ops = &sh770x_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -92,6 +95,8 @@ static struct plat_sci_port scif1_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.irqs = { 52, 52, 52 },
+ .ops = &sh770x_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct platform_device scif1_device = {
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 4551ad647c2..3f3d5fe5892 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -19,6 +19,7 @@
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
+#include <cpu/serial.h>
enum {
UNUSED = 0,
@@ -108,11 +109,14 @@ static struct platform_device rtc_device = {
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xfffffe80,
+ .port_reg = 0xa4000136,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_TE | SCSCR_RE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCI,
.irqs = { 23, 23, 23, 0 },
+ .ops = &sh770x_sci_port_ops,
+ .regshift = 1,
};
static struct platform_device scif0_device = {
@@ -132,6 +136,8 @@ static struct plat_sci_port scif1_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 56, 56, 56, 56 },
+ .ops = &sh770x_sci_port_ops,
+ .regtype = SCIx_SH3_SCIF_REGTYPE,
};
static struct platform_device scif1_device = {
@@ -146,11 +152,14 @@ static struct platform_device scif1_device = {
defined(CONFIG_CPU_SUBTYPE_SH7709)
static struct plat_sci_port scif2_platform_data = {
.mapbase = 0xa4000140,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_TE | SCSCR_RE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_IRDA,
.irqs = { 52, 52, 52, 52 },
+ .ops = &sh770x_sci_port_ops,
+ .regshift = 1,
};
static struct platform_device scif2_device = {
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index 365b94a6fcb..94920345c14 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -20,6 +20,7 @@
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <asm/rtc.h>
+#include <cpu/serial.h>
static struct resource rtc_resources[] = {
[0] = {
@@ -55,6 +56,8 @@ static struct plat_sci_port scif0_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
+ .ops = &sh7720_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -72,6 +75,8 @@ static struct plat_sci_port scif1_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
+ .ops = &sh7720_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct platform_device scif1_device = {
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index 3f6f8e98635..f4e262adb39 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -147,8 +147,6 @@ static struct clk *sh4202_onchip_clocks[] = {
&sh4202_shoc_clk,
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("emi_clk", &sh4202_emi_clk),
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index e53b4b38bd1..98cc0c794c7 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -1,5 +1,5 @@
/*
- * SH7750/SH7751 Setup
+ * SH7091/SH7750/SH7750S/SH7750R/SH7751/SH7751R Setup
*
* Copyright (C) 2006 Paul Mundt
* Copyright (C) 2006 Jamie Lenehan
@@ -38,11 +38,13 @@ static struct platform_device rtc_device = {
static struct plat_sci_port sci_platform_data = {
.mapbase = 0xffe00000,
+ .port_reg = 0xffe0001C,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_TE | SCSCR_RE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCI,
.irqs = { 23, 23, 23, 0 },
+ .regshift = 2,
};
static struct platform_device sci_device = {
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 78bbf232e39..c0b4c774700 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -133,6 +133,7 @@ static struct plat_sci_port scif0_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 52, 53, 55, 54 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -150,6 +151,7 @@ static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.irqs = { 72, 73, 75, 74 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif1_device = {
@@ -167,6 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 76, 77, 79, 78 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif2_device = {
@@ -184,6 +187,7 @@ static struct plat_sci_port scif3_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCI,
.irqs = { 80, 81, 82, 0 },
+ .regshift = 2,
};
static struct platform_device scif3_device = {
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index cc122b1d303..c57fb287011 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o
obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o intc-shx3.o
obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o serial-sh7722.o
obj-$(CONFIG_CPU_SUBTYPE_SH7723) += setup-sh7723.o
obj-$(CONFIG_CPU_SUBTYPE_SH7724) += setup-sh7724.o
obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index 93c646072c1..70e45bdaadc 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -194,8 +194,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
@@ -233,32 +231,17 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
- {
- /* SCIF0 */
- .dev_id = "sh-sci.0",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP007],
- }, {
- /* SCIF1 */
- .dev_id = "sh-sci.1",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP006],
- }, {
- /* SCIF2 */
- .dev_id = "sh-sci.2",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP005],
- }, {
- /* SCIF3 */
- .dev_id = "sh-sci.3",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP004],
- },
+
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP004]),
+
CLKDEV_CON_ID("sio0", &mstp_clks[MSTP003]),
CLKDEV_CON_ID("siof0", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("siof1", &mstp_clks[MSTP001]),
- CLKDEV_CON_ID("i2c0", &mstp_clks[MSTP109]),
- CLKDEV_CON_ID("i2c1", &mstp_clks[MSTP108]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP108]),
CLKDEV_CON_ID("tpu0", &mstp_clks[MSTP225]),
CLKDEV_CON_ID("irda0", &mstp_clks[MSTP224]),
CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
index 049dc0628cc..3c3165000c5 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -192,8 +192,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
@@ -231,25 +229,14 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
- {
- /* SCIF0 */
- .dev_id = "sh-sci.0",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP007],
- }, {
- /* SCIF1 */
- .dev_id = "sh-sci.1",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP006],
- }, {
- /* SCIF2 */
- .dev_id = "sh-sci.2",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP005],
- },
+
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]),
+
CLKDEV_CON_ID("msiof0", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("sbr0", &mstp_clks[MSTP001]),
- CLKDEV_CON_ID("i2c0", &mstp_clks[MSTP109]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]),
CLKDEV_CON_ID("icb0", &mstp_clks[MSTP227]),
CLKDEV_CON_ID("meram0", &mstp_clks[MSTP226]),
CLKDEV_CON_ID("dacy1", &mstp_clks[MSTP224]),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 9d23a36f064..c9a48088ad4 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -175,8 +175,6 @@ static struct clk mstp_clks[HWBLK_NR] = {
SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_P], 0),
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
@@ -201,42 +199,20 @@ static struct clk_lookup lookups[] = {
/* MSTP clocks */
CLKDEV_CON_ID("uram0", &mstp_clks[HWBLK_URAM]),
CLKDEV_CON_ID("xymem0", &mstp_clks[HWBLK_XYMEM]),
- {
- /* TMU0 */
- .dev_id = "sh_tmu.0",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[HWBLK_TMU],
- }, {
- /* TMU1 */
- .dev_id = "sh_tmu.1",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[HWBLK_TMU],
- }, {
- /* TMU2 */
- .dev_id = "sh_tmu.2",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[HWBLK_TMU],
- },
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[HWBLK_TMU]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[HWBLK_TMU]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[HWBLK_TMU]),
+
CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
- {
- /* SCIF0 */
- .dev_id = "sh-sci.0",
- .con_id = "sci_fck",
- .clk = &mstp_clks[HWBLK_SCIF0],
- }, {
- /* SCIF1 */
- .dev_id = "sh-sci.1",
- .con_id = "sci_fck",
- .clk = &mstp_clks[HWBLK_SCIF1],
- }, {
- /* SCIF2 */
- .dev_id = "sh-sci.2",
- .con_id = "sci_fck",
- .clk = &mstp_clks[HWBLK_SCIF2],
- },
- CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC]),
+
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
+
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]),
CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI]),
CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index 55493cd5bd8..3cc3827380e 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -200,8 +200,6 @@ static struct clk mstp_clks[] = {
SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0),
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
@@ -305,7 +303,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]),
CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]),
CLKDEV_CON_ID("meram0", &mstp_clks[HWBLK_MERAM]),
- CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]),
CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
CLKDEV_CON_ID("adc0", &mstp_clks[HWBLK_ADC]),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index d08fa953c88..8668f557e0a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -252,8 +252,6 @@ static struct clk mstp_clks[HWBLK_NR] = {
SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0),
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
@@ -289,77 +287,31 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
- {
- /* TMU0 */
- .dev_id = "sh_tmu.0",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[HWBLK_TMU0],
- }, {
- /* TMU1 */
- .dev_id = "sh_tmu.1",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[HWBLK_TMU0],
- }, {
- /* TMU2 */
- .dev_id = "sh_tmu.2",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[HWBLK_TMU0],
- }, {
- /* TMU3 */
- .dev_id = "sh_tmu.3",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[HWBLK_TMU1],
- },
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[HWBLK_TMU1]),
+
CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
CLKDEV_CON_ID("dmac1", &mstp_clks[HWBLK_DMAC1]),
- {
- /* TMU4 */
- .dev_id = "sh_tmu.4",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[HWBLK_TMU1],
- }, {
- /* TMU5 */
- .dev_id = "sh_tmu.5",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[HWBLK_TMU1],
- }, {
- /* SCIF0 */
- .dev_id = "sh-sci.0",
- .con_id = "sci_fck",
- .clk = &mstp_clks[HWBLK_SCIF0],
- }, {
- /* SCIF1 */
- .dev_id = "sh-sci.1",
- .con_id = "sci_fck",
- .clk = &mstp_clks[HWBLK_SCIF1],
- }, {
- /* SCIF2 */
- .dev_id = "sh-sci.2",
- .con_id = "sci_fck",
- .clk = &mstp_clks[HWBLK_SCIF2],
- }, {
- /* SCIF3 */
- .dev_id = "sh-sci.3",
- .con_id = "sci_fck",
- .clk = &mstp_clks[HWBLK_SCIF3],
- }, {
- /* SCIF4 */
- .dev_id = "sh-sci.4",
- .con_id = "sci_fck",
- .clk = &mstp_clks[HWBLK_SCIF4],
- }, {
- /* SCIF5 */
- .dev_id = "sh-sci.5",
- .con_id = "sci_fck",
- .clk = &mstp_clks[HWBLK_SCIF5],
- },
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[HWBLK_TMU1]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[HWBLK_TMU1]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[HWBLK_SCIF3]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[HWBLK_SCIF4]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
+
CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]),
CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]),
CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
- CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC0]),
- CLKDEV_CON_ID("i2c1", &mstp_clks[HWBLK_IIC1]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC0]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[HWBLK_IIC1]),
CLKDEV_CON_ID("mmc0", &mstp_clks[HWBLK_MMC]),
CLKDEV_CON_ID("eth0", &mstp_clks[HWBLK_ETHER]),
CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index eedddad1383..3b097b09a3b 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -101,8 +101,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP220] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 20, 0),
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
@@ -116,33 +114,13 @@ static struct clk_lookup lookups[] = {
/* MSTP32 clocks */
CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]),
CLKDEV_CON_ID("riic", &mstp_clks[MSTP000]),
- {
- /* TMU0 */
- .dev_id = "sh_tmu.0",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP113],
- }, {
- /* TMU1 */
- .dev_id = "sh_tmu.1",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP114],
- },
- {
- /* SCIF4 (But, ID is 2) */
- .dev_id = "sh-sci.2",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP112],
- }, {
- /* SCIF3 */
- .dev_id = "sh-sci.1",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP111],
- }, {
- /* SCIF2 */
- .dev_id = "sh-sci.0",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP110],
- },
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP113]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP114]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP112]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP111]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP110]),
+
CLKDEV_CON_ID("usb0", &mstp_clks[MSTP102]),
CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]),
};
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
index 599630fc4d3..2d4c7fd79c0 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -91,8 +91,6 @@ static struct clk *sh7763_onchip_clocks[] = {
&sh7763_shyway_clk,
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("shyway_clk", &sh7763_shyway_clk),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
index 8894926479a..3b53348fe2f 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
@@ -97,8 +97,6 @@ static struct clk *sh7780_onchip_clocks[] = {
&sh7780_shyway_clk,
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("shyway_clk", &sh7780_shyway_clk),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
index 2d960247f3e..e5b420cc126 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -116,8 +116,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP100] = SH_CLK_MSTP32(NULL, MSTPCR1, 0, 0),
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
@@ -134,74 +132,27 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */
- {
- /* SCIF5 */
- .dev_id = "sh-sci.5",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP029],
- }, {
- /* SCIF4 */
- .dev_id = "sh-sci.4",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP028],
- }, {
- /* SCIF3 */
- .dev_id = "sh-sci.3",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP027],
- }, {
- /* SCIF2 */
- .dev_id = "sh-sci.2",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP026],
- }, {
- /* SCIF1 */
- .dev_id = "sh-sci.1",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP025],
- }, {
- /* SCIF0 */
- .dev_id = "sh-sci.0",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP024],
- },
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
+
CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
CLKDEV_CON_ID("mmcif_fck", &mstp_clks[MSTP013]),
CLKDEV_CON_ID("flctl_fck", &mstp_clks[MSTP012]),
- {
- /* TMU0 */
- .dev_id = "sh_tmu.0",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP008],
- }, {
- /* TMU1 */
- .dev_id = "sh_tmu.1",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP008],
- }, {
- /* TMU2 */
- .dev_id = "sh_tmu.2",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP008],
- }, {
- /* TMU3 */
- .dev_id = "sh_tmu.3",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP009],
- }, {
- /* TMU4 */
- .dev_id = "sh_tmu.4",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP009],
- }, {
- /* TMU5 */
- .dev_id = "sh_tmu.5",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP009],
- },
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[MSTP009]),
+
CLKDEV_CON_ID("siof_fck", &mstp_clks[MSTP003]),
CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
index 42e403be907..f6c0c3d5599 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -125,8 +125,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP102] = SH_CLK_MSTP32(NULL, MSTPCR1, 2, 0),
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
@@ -141,37 +139,13 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */
- {
- /* SCIF5 */
- .dev_id = "sh-sci.5",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP029],
- }, {
- /* SCIF4 */
- .dev_id = "sh-sci.4",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP028],
- }, {
- /* SCIF3 */
- .dev_id = "sh-sci.3",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP027],
- }, {
- /* SCIF2 */
- .dev_id = "sh-sci.2",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP026],
- }, {
- /* SCIF1 */
- .dev_id = "sh-sci.1",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP025],
- }, {
- /* SCIF0 */
- .dev_id = "sh-sci.0",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP024],
- },
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
+
CLKDEV_CON_ID("ssi3_fck", &mstp_clks[MSTP023]),
CLKDEV_CON_ID("ssi2_fck", &mstp_clks[MSTP022]),
CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
@@ -180,67 +154,20 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
CLKDEV_CON_ID("i2c1_fck", &mstp_clks[MSTP015]),
CLKDEV_CON_ID("i2c0_fck", &mstp_clks[MSTP014]),
- {
- /* TMU0 */
- .dev_id = "sh_tmu.0",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP008],
- }, {
- /* TMU1 */
- .dev_id = "sh_tmu.1",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP008],
- }, {
- /* TMU2 */
- .dev_id = "sh_tmu.2",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP008],
- }, {
- /* TMU3 */
- .dev_id = "sh_tmu.3",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP009],
- }, {
- /* TMU4 */
- .dev_id = "sh_tmu.4",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP009],
- }, {
- /* TMU5 */
- .dev_id = "sh_tmu.5",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP009],
- }, {
- /* TMU6 */
- .dev_id = "sh_tmu.6",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP010],
- }, {
- /* TMU7 */
- .dev_id = "sh_tmu.7",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP010],
- }, {
- /* TMU8 */
- .dev_id = "sh_tmu.8",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP010],
- }, {
- /* TMU9 */
- .dev_id = "sh_tmu.9",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP011],
- }, {
- /* TMU10 */
- .dev_id = "sh_tmu.10",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP011],
- }, {
- /* TMU11 */
- .dev_id = "sh_tmu.11",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP011],
- },
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.6", &mstp_clks[MSTP010]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.7", &mstp_clks[MSTP010]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.8", &mstp_clks[MSTP010]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.9", &mstp_clks[MSTP011]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.10", &mstp_clks[MSTP011]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.11", &mstp_clks[MSTP011]),
+
CLKDEV_CON_ID("sdif1_fck", &mstp_clks[MSTP005]),
CLKDEV_CON_ID("sdif0_fck", &mstp_clks[MSTP004]),
CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
index 1afdb93b8cc..bf2d00b8b90 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -100,8 +100,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
};
-#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
-
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
@@ -116,62 +114,23 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */
- {
- /* SCIF3 */
- .dev_id = "sh-sci.3",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP027],
- }, {
- /* SCIF2 */
- .dev_id = "sh-sci.2",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP026],
- }, {
- /* SCIF1 */
- .dev_id = "sh-sci.1",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP025],
- }, {
- /* SCIF0 */
- .dev_id = "sh-sci.0",
- .con_id = "sci_fck",
- .clk = &mstp_clks[MSTP024],
- },
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
+
CLKDEV_CON_ID("h8ex_fck", &mstp_clks[MSTP003]),
CLKDEV_CON_ID("csm_fck", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("fe1_fck", &mstp_clks[MSTP001]),
CLKDEV_CON_ID("fe0_fck", &mstp_clks[MSTP000]),
- {
- /* TMU0 */
- .dev_id = "sh_tmu.0",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP008],
- }, {
- /* TMU1 */
- .dev_id = "sh_tmu.1",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP008],
- }, {
- /* TMU2 */
- .dev_id = "sh_tmu.2",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP008],
- }, {
- /* TMU3 */
- .dev_id = "sh_tmu.3",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP009],
- }, {
- /* TMU4 */
- .dev_id = "sh_tmu.4",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP009],
- }, {
- /* TMU5 */
- .dev_id = "sh_tmu.5",
- .con_id = "tmu_fck",
- .clk = &mstp_clks[MSTP009],
- },
+
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[MSTP009]),
+
CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
diff --git a/arch/sh/kernel/cpu/sh4a/serial-sh7722.c b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c
new file mode 100644
index 00000000000..59bc3a72702
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c
@@ -0,0 +1,23 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+
+#define PSCR 0xA405011E
+
+static void sh7722_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ if (port->mapbase == 0xffe00000) {
+ data = __raw_readw(PSCR);
+ data &= ~0x03cf;
+ if (!(cflag & CRTSCTS))
+ data |= 0x0340;
+
+ __raw_writew(data, PSCR);
+ }
+}
+
+struct plat_sci_port_ops sh7722_sci_port_ops = {
+ .init_pins = sh7722_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 82616af64d6..87773869a2f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -20,6 +20,7 @@
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xffe00000,
+ .port_reg = 0xa405013e,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index 5813d802361..278a0e57215 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -22,6 +22,7 @@
#include <cpu/dma-register.h>
#include <cpu/sh7722.h>
+#include <cpu/serial.h>
static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
{
@@ -185,6 +186,8 @@ static struct plat_sci_port scif0_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
+ .ops = &sh7722_sci_port_ops,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -202,6 +205,8 @@ static struct plat_sci_port scif1_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
+ .ops = &sh7722_sci_port_ops,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct platform_device scif1_device = {
@@ -219,6 +224,8 @@ static struct plat_sci_port scif2_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
+ .ops = &sh7722_sci_port_ops,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct platform_device scif2_device = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 072382280f9..3c2810d8f72 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -23,11 +23,13 @@
/* Serial */
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xffe00000,
+ .port_reg = 0xa4050160,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -40,11 +42,13 @@ static struct platform_device scif0_device = {
static struct plat_sci_port scif1_platform_data = {
.mapbase = 0xffe10000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct platform_device scif1_device = {
@@ -57,11 +61,13 @@ static struct platform_device scif1_device = {
static struct plat_sci_port scif2_platform_data = {
.mapbase = 0xffe20000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct platform_device scif2_device = {
@@ -75,6 +81,7 @@ static struct platform_device scif2_device = {
static struct plat_sci_port scif3_platform_data = {
.mapbase = 0xa4e30000,
.flags = UPF_BOOT_AUTOCONF,
+ .port_reg = SCIx_NOT_SUPPORTED,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_3,
.type = PORT_SCIFA,
@@ -91,6 +98,7 @@ static struct platform_device scif3_device = {
static struct plat_sci_port scif4_platform_data = {
.mapbase = 0xa4e40000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_3,
@@ -108,6 +116,7 @@ static struct platform_device scif4_device = {
static struct plat_sci_port scif5_platform_data = {
.mapbase = 0xa4e50000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_3,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 134a397b191..a37dd72c367 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -296,11 +296,13 @@ static struct platform_device dma1_device = {
/* Serial */
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xffe00000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -313,11 +315,13 @@ static struct platform_device scif0_device = {
static struct plat_sci_port scif1_platform_data = {
.mapbase = 0xffe10000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct platform_device scif1_device = {
@@ -330,11 +334,13 @@ static struct platform_device scif1_device = {
static struct plat_sci_port scif2_platform_data = {
.mapbase = 0xffe20000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
};
static struct platform_device scif2_device = {
@@ -347,6 +353,7 @@ static struct platform_device scif2_device = {
static struct plat_sci_port scif3_platform_data = {
.mapbase = 0xa4e30000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_3,
@@ -364,6 +371,7 @@ static struct platform_device scif3_device = {
static struct plat_sci_port scif4_platform_data = {
.mapbase = 0xa4e40000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_3,
@@ -381,6 +389,7 @@ static struct platform_device scif4_device = {
static struct plat_sci_port scif5_platform_data = {
.mapbase = 0xa4e50000,
+ .port_reg = SCIx_NOT_SUPPORTED,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_3,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 593eca6509b..00113515f23 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -23,6 +23,7 @@ static struct plat_sci_port scif0_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 40, 40, 40, 40 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -40,6 +41,7 @@ static struct plat_sci_port scif1_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 76, 76, 76, 76 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif1_device = {
@@ -57,6 +59,7 @@ static struct plat_sci_port scif2_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 104, 104, 104, 104 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif2_device = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 08add7fa684..3d4d2075c19 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -14,7 +14,6 @@
#include <linux/serial_sci.h>
#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
-
#include <cpu/dma-register.h>
static struct plat_sci_port scif0_platform_data = {
@@ -24,6 +23,7 @@ static struct plat_sci_port scif0_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 40, 40, 40, 40 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -41,6 +41,7 @@ static struct plat_sci_port scif1_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 76, 76, 76, 76 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif1_device = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 18d8fc136fb..b29e6340414 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -15,9 +15,7 @@
#include <linux/mm.h>
#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
-
#include <asm/mmzone.h>
-
#include <cpu/dma-register.h>
static struct plat_sci_port scif0_platform_data = {
@@ -27,6 +25,7 @@ static struct plat_sci_port scif0_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 40, 40, 40, 40 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -44,6 +43,7 @@ static struct plat_sci_port scif1_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 44, 44, 44, 44 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif1_device = {
@@ -61,6 +61,7 @@ static struct plat_sci_port scif2_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 60, 60, 60, 60 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif2_device = {
@@ -78,6 +79,7 @@ static struct plat_sci_port scif3_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 61, 61, 61, 61 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif3_device = {
@@ -95,6 +97,7 @@ static struct plat_sci_port scif4_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 62, 62, 62, 62 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif4_device = {
@@ -112,6 +115,7 @@ static struct plat_sci_port scif5_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 63, 63, 63, 63 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif5_device = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index beba32beb6d..dd5e709f982 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -1,7 +1,7 @@
/*
* SH7786 Setup
*
- * Copyright (C) 2009 - 2010 Renesas Solutions Corp.
+ * Copyright (C) 2009 - 2011 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
* Paul Mundt <paul.mundt@renesas.com>
*
@@ -33,6 +33,7 @@ static struct plat_sci_port scif0_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 40, 41, 43, 42 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif0_device = {
@@ -53,6 +54,7 @@ static struct plat_sci_port scif1_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 44, 44, 44, 44 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif1_device = {
@@ -70,6 +72,7 @@ static struct plat_sci_port scif2_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 50, 50, 50, 50 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif2_device = {
@@ -87,6 +90,7 @@ static struct plat_sci_port scif3_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 51, 51, 51, 51 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif3_device = {
@@ -104,6 +108,7 @@ static struct plat_sci_port scif4_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 52, 52, 52, 52 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif4_device = {
@@ -121,6 +126,7 @@ static struct plat_sci_port scif5_platform_data = {
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 53, 53, 53, 53 },
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif5_device = {
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 84db0d6ccd0..32114e0941a 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -16,12 +16,13 @@
#include <linux/thread_info.h>
#include <linux/irqflags.h>
#include <linux/smp.h>
+#include <linux/cpuidle.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <linux/atomic.h>
#include <asm/smp.h>
-void (*pm_idle)(void) = NULL;
+static void (*pm_idle)(void);
static int hlt_counter;
@@ -100,7 +101,8 @@ void cpu_idle(void)
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
- pm_idle();
+ if (cpuidle_idle_call())
+ pm_idle();
/*
* Sanity check to ensure that pm_idle() returns
* with IRQs enabled
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 1074dddcb10..42c67beadca 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -54,6 +54,7 @@ config SPARC64
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select IRQ_PREFLOW_FASTEOI
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
config ARCH_DEFCONFIG
string
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 3c93f08ce18..2c2e38821f6 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -16,3 +16,8 @@ header-y += traps.h
header-y += uctx.h
header-y += utrap.h
header-y += watchdog.h
+
+generic-y += div64.h
+generic-y += local64.h
+generic-y += irq_regs.h
+generic-y += local.h
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index 325e295d60d..29011cc0e4b 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -26,61 +26,28 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
-#include <asm-generic/bitops/ffz.h>
-#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
+extern int ffs(int x);
+extern unsigned long __ffs(unsigned long);
+
+#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/ffs.h>
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
-#ifdef ULTRA_HAS_POPULATION_COUNT
-
-static inline unsigned int __arch_hweight64(unsigned long w)
-{
- unsigned int res;
-
- __asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
- return res;
-}
-
-static inline unsigned int __arch_hweight32(unsigned int w)
-{
- unsigned int res;
-
- __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff));
- return res;
-}
+extern unsigned long __arch_hweight64(__u64 w);
+extern unsigned int __arch_hweight32(unsigned int w);
+extern unsigned int __arch_hweight16(unsigned int w);
+extern unsigned int __arch_hweight8(unsigned int w);
-static inline unsigned int __arch_hweight16(unsigned int w)
-{
- unsigned int res;
-
- __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff));
- return res;
-}
-
-static inline unsigned int __arch_hweight8(unsigned int w)
-{
- unsigned int res;
-
- __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff));
- return res;
-}
-
-#else
-
-#include <asm-generic/bitops/arch_hweight.h>
-
-#endif
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/div64.h b/arch/sparc/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb2..00000000000
--- a/arch/sparc/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index 64f7a00b374..7df8b7f544d 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -59,15 +59,33 @@
#define R_SPARC_6 45
/* Bits present in AT_HWCAP, primarily for Sparc32. */
-
-#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
-#define HWCAP_SPARC_STBAR 2
-#define HWCAP_SPARC_SWAP 4
-#define HWCAP_SPARC_MULDIV 8
-#define HWCAP_SPARC_V9 16
-#define HWCAP_SPARC_ULTRA3 32
-#define HWCAP_SPARC_BLKINIT 64
-#define HWCAP_SPARC_N2 128
+#define HWCAP_SPARC_FLUSH 0x00000001
+#define HWCAP_SPARC_STBAR 0x00000002
+#define HWCAP_SPARC_SWAP 0x00000004
+#define HWCAP_SPARC_MULDIV 0x00000008
+#define HWCAP_SPARC_V9 0x00000010
+#define HWCAP_SPARC_ULTRA3 0x00000020
+#define HWCAP_SPARC_BLKINIT 0x00000040
+#define HWCAP_SPARC_N2 0x00000080
+
+/* Solaris compatible AT_HWCAP bits. */
+#define AV_SPARC_MUL32 0x00000100 /* 32x32 multiply is efficient */
+#define AV_SPARC_DIV32 0x00000200 /* 32x32 divide is efficient */
+#define AV_SPARC_FSMULD 0x00000400 /* 'fsmuld' is efficient */
+#define AV_SPARC_V8PLUS 0x00000800 /* v9 insn available to 32bit */
+#define AV_SPARC_POPC 0x00001000 /* 'popc' is efficient */
+#define AV_SPARC_VIS 0x00002000 /* VIS insns available */
+#define AV_SPARC_VIS2 0x00004000 /* VIS2 insns available */
+#define AV_SPARC_ASI_BLK_INIT 0x00008000 /* block init ASIs available */
+#define AV_SPARC_FMAF 0x00010000 /* fused multiply-add */
+#define AV_SPARC_VIS3 0x00020000 /* VIS3 insns available */
+#define AV_SPARC_HPC 0x00040000 /* HPC insns available */
+#define AV_SPARC_RANDOM 0x00080000 /* 'random' insn available */
+#define AV_SPARC_TRANS 0x00100000 /* transaction insns available */
+#define AV_SPARC_FJFMAU 0x00200000 /* unfused multiply-add */
+#define AV_SPARC_IMA 0x00400000 /* integer multiply-add */
+#define AV_SPARC_ASI_CACHE_SPARING \
+ 0x00800000 /* cache sparing ASIs available */
#define CORE_DUMP_USE_REGSET
@@ -162,33 +180,8 @@ typedef struct {
#define ELF_ET_DYN_BASE 0x0000010000000000UL
#define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
-
-/* This yields a mask that user programs can use to figure out what
- instruction set this cpu supports. */
-
-/* On Ultra, we support all of the v8 capabilities. */
-static inline unsigned int sparc64_elf_hwcap(void)
-{
- unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
- HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
- HWCAP_SPARC_V9);
-
- if (tlb_type == cheetah || tlb_type == cheetah_plus)
- cap |= HWCAP_SPARC_ULTRA3;
- else if (tlb_type == hypervisor) {
- if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
- cap |= HWCAP_SPARC_BLKINIT;
- if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
- cap |= HWCAP_SPARC_N2;
- }
-
- return cap;
-}
-
-#define ELF_HWCAP sparc64_elf_hwcap()
+extern unsigned long sparc64_elf_hwcap;
+#define ELF_HWCAP sparc64_elf_hwcap
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 7a5f80df15d..015a761eaa3 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2927,6 +2927,13 @@ extern unsigned long sun4v_ncs_request(unsigned long request,
#define HV_FAST_FIRE_GET_PERFREG 0x120
#define HV_FAST_FIRE_SET_PERFREG 0x121
+#define HV_FAST_REBOOT_DATA_SET 0x172
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_reboot_data_set(unsigned long ra,
+ unsigned long len);
+#endif
+
/* Function numbers for HV_CORE_TRAP. */
#define HV_CORE_SET_VER 0x00
#define HV_CORE_PUTCHAR 0x01
@@ -2940,11 +2947,17 @@ extern unsigned long sun4v_ncs_request(unsigned long request,
#define HV_GRP_CORE 0x0001
#define HV_GRP_INTR 0x0002
#define HV_GRP_SOFT_STATE 0x0003
+#define HV_GRP_TM 0x0080
#define HV_GRP_PCI 0x0100
#define HV_GRP_LDOM 0x0101
#define HV_GRP_SVC_CHAN 0x0102
#define HV_GRP_NCS 0x0103
#define HV_GRP_RNG 0x0104
+#define HV_GRP_PBOOT 0x0105
+#define HV_GRP_TPM 0x0107
+#define HV_GRP_SDIO 0x0108
+#define HV_GRP_SDIO_ERR 0x0109
+#define HV_GRP_REBOOT_DATA 0x0110
#define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201
#define HV_GRP_N2_CPU 0x0202
diff --git a/arch/sparc/include/asm/irq_regs.h b/arch/sparc/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b7027..00000000000
--- a/arch/sparc/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/sparc/include/asm/local.h b/arch/sparc/include/asm/local.h
deleted file mode 100644
index bc80815a435..00000000000
--- a/arch/sparc/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _SPARC_LOCAL_H
-#define _SPARC_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif
diff --git a/arch/sparc/include/asm/local64.h b/arch/sparc/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc23..00000000000
--- a/arch/sparc/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index 83c571d8c8a..1a8afd1ad04 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -133,29 +133,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
sub TSB, 0x8, TSB; \
TSB_STORE(TSB, TAG);
-#define KTSB_LOAD_QUAD(TSB, REG) \
- ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
-
-#define KTSB_STORE(ADDR, VAL) \
- stxa VAL, [ADDR] ASI_N;
-
-#define KTSB_LOCK_TAG(TSB, REG1, REG2) \
-99: lduwa [TSB] ASI_N, REG1; \
- sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
- andcc REG1, REG2, %g0; \
- bne,pn %icc, 99b; \
- nop; \
- casa [TSB] ASI_N, REG1, REG2;\
- cmp REG1, REG2; \
- bne,pn %icc, 99b; \
- nop; \
-
-#define KTSB_WRITE(TSB, TTE, TAG) \
- add TSB, 0x8, TSB; \
- stxa TTE, [TSB] ASI_N; \
- sub TSB, 0x8, TSB; \
- stxa TAG, [TSB] ASI_N;
-
/* Do a kernel page table walk. Leaves physical PTE pointer in
* REG1. Jumps to FAIL_LABEL on early page table walk termination.
* VADDR will not be clobbered, but REG2 will.
@@ -239,6 +216,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
(KERNEL_TSB_SIZE_BYTES / 16)
#define KERNEL_TSB4M_NENTRIES 4096
+#define KTSB_PHYS_SHIFT 15
+
/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
* on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
* and the found TTE will be left in REG1. REG3 and REG4 must
@@ -247,13 +226,22 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
* VADDR and TAG will be preserved and not clobbered by this macro.
*/
#define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
- sethi %hi(swapper_tsb), REG1; \
+661: sethi %hi(swapper_tsb), REG1; \
or REG1, %lo(swapper_tsb), REG1; \
+ .section .swapper_tsb_phys_patch, "ax"; \
+ .word 661b; \
+ .previous; \
+661: nop; \
+ .section .tsb_ldquad_phys_patch, "ax"; \
+ .word 661b; \
+ sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+ sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+ .previous; \
srlx VADDR, PAGE_SHIFT, REG2; \
and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
sllx REG2, 4, REG2; \
add REG1, REG2, REG2; \
- KTSB_LOAD_QUAD(REG2, REG3); \
+ TSB_LOAD_QUAD(REG2, REG3); \
cmp REG3, TAG; \
be,a,pt %xcc, OK_LABEL; \
mov REG4, REG1;
@@ -263,12 +251,21 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
* we can make use of that for the index computation.
*/
#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
- sethi %hi(swapper_4m_tsb), REG1; \
+661: sethi %hi(swapper_4m_tsb), REG1; \
or REG1, %lo(swapper_4m_tsb), REG1; \
+ .section .swapper_4m_tsb_phys_patch, "ax"; \
+ .word 661b; \
+ .previous; \
+661: nop; \
+ .section .tsb_ldquad_phys_patch, "ax"; \
+ .word 661b; \
+ sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+ sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+ .previous; \
and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
sllx REG2, 4, REG2; \
add REG1, REG2, REG2; \
- KTSB_LOAD_QUAD(REG2, REG3); \
+ TSB_LOAD_QUAD(REG2, REG3); \
cmp REG3, TAG; \
be,a,pt %xcc, OK_LABEL; \
mov REG4, REG1;
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 17cf290dc2b..9810fd88105 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -396,6 +396,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
, cpu_data(0).clock_tick
#endif
);
+ cpucap_info(m);
#ifdef CONFIG_SMP
smp_bogo(m);
#endif
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index dd1342c0a3b..490e5418740 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -15,12 +15,15 @@
#include <linux/reboot.h>
#include <linux/cpu.h>
+#include <asm/hypervisor.h>
#include <asm/ldc.h>
#include <asm/vio.h>
#include <asm/mdesc.h>
#include <asm/head.h>
#include <asm/irq.h>
+#include "kernel.h"
+
#define DRV_MODULE_NAME "ds"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
@@ -828,18 +831,32 @@ void ldom_set_var(const char *var, const char *value)
}
}
+static char full_boot_str[256] __attribute__((aligned(32)));
+static int reboot_data_supported;
+
void ldom_reboot(const char *boot_command)
{
/* Don't bother with any of this if the boot_command
* is empty.
*/
if (boot_command && strlen(boot_command)) {
- char full_boot_str[256];
+ unsigned long len;
strcpy(full_boot_str, "boot ");
strcpy(full_boot_str + strlen("boot "), boot_command);
+ len = strlen(full_boot_str);
- ldom_set_var("reboot-command", full_boot_str);
+ if (reboot_data_supported) {
+ unsigned long ra = kimage_addr_to_ra(full_boot_str);
+ unsigned long hv_ret;
+
+ hv_ret = sun4v_reboot_data_set(ra, len);
+ if (hv_ret != HV_EOK)
+ pr_err("SUN4V: Unable to set reboot data "
+ "hv_ret=%lu\n", hv_ret);
+ } else {
+ ldom_set_var("reboot-command", full_boot_str);
+ }
}
sun4v_mach_sir();
}
@@ -1237,6 +1254,15 @@ static struct vio_driver ds_driver = {
static int __init ds_init(void)
{
+ unsigned long hv_ret, major, minor;
+
+ hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor);
+ if (hv_ret == HV_EOK) {
+ pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n",
+ major, minor);
+ reboot_data_supported = 1;
+ }
+
kthread_run(ds_thread, NULL, "kldomd");
return vio_register_driver(&ds_driver);
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index d1f1361c416..e27f8ea8656 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -42,6 +42,20 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
extern void fpload(unsigned long *fpregs, unsigned long *fsr);
#else /* CONFIG_SPARC32 */
+struct popc_3insn_patch_entry {
+ unsigned int addr;
+ unsigned int insns[3];
+};
+extern struct popc_3insn_patch_entry __popc_3insn_patch,
+ __popc_3insn_patch_end;
+
+struct popc_6insn_patch_entry {
+ unsigned int addr;
+ unsigned int insns[6];
+};
+extern struct popc_6insn_patch_entry __popc_6insn_patch,
+ __popc_6insn_patch_end;
+
extern void __init per_cpu_patch(void);
extern void __init sun4v_patch(void);
extern void __init boot_cpu_id_too_large(int cpu);
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index c752603a7c0..0eac1b2fc53 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -559,7 +559,7 @@ niagara2_patch:
nop
call niagara_patch_bzero
nop
- call niagara2_patch_pageops
+ call niagara_patch_pageops
nop
ba,a,pt %xcc, 80f
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index d306e648c33..c2d055d8ba9 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -28,11 +28,17 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_CORE, .flags = FLAG_PRE_API },
{ .group = HV_GRP_INTR, },
{ .group = HV_GRP_SOFT_STATE, },
+ { .group = HV_GRP_TM, },
{ .group = HV_GRP_PCI, .flags = FLAG_PRE_API },
{ .group = HV_GRP_LDOM, },
{ .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API },
{ .group = HV_GRP_NCS, .flags = FLAG_PRE_API },
{ .group = HV_GRP_RNG, },
+ { .group = HV_GRP_PBOOT, },
+ { .group = HV_GRP_TPM, },
+ { .group = HV_GRP_SDIO, },
+ { .group = HV_GRP_SDIO_ERR, },
+ { .group = HV_GRP_REBOOT_DATA, },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, },
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index 8a5f35ffb15..58d60de4d65 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -798,3 +798,10 @@ ENTRY(sun4v_niagara2_setperf)
retl
nop
ENDPROC(sun4v_niagara2_setperf)
+
+ENTRY(sun4v_reboot_data_set)
+ mov HV_FAST_REBOOT_DATA_SET, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_reboot_data_set)
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 6ffccd6e015..d0479e2163f 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -65,9 +65,6 @@ static inline void dma_make_coherent(unsigned long pa, unsigned long len)
}
#endif
-static struct resource *_sparc_find_resource(struct resource *r,
- unsigned long);
-
static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
unsigned long size, char *name);
@@ -143,7 +140,11 @@ void iounmap(volatile void __iomem *virtual)
unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
struct resource *res;
- if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
+ /*
+ * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
+ * This probably warrants some sort of hashing.
+ */
+ if ((res = lookup_resource(&sparc_iomap, vaddr)) == NULL) {
printk("free_io/iounmap: cannot free %lx\n", vaddr);
return;
}
@@ -319,7 +320,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
struct resource *res;
struct page *pgv;
- if ((res = _sparc_find_resource(&_sparc_dvma,
+ if ((res = lookup_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
printk("sbus_free_consistent: cannot free %p\n", p);
return;
@@ -492,7 +493,7 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
{
struct resource *res;
- if ((res = _sparc_find_resource(&_sparc_dvma,
+ if ((res = lookup_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
printk("pci_free_consistent: cannot free %p\n", p);
return;
@@ -715,25 +716,6 @@ static const struct file_operations sparc_io_proc_fops = {
};
#endif /* CONFIG_PROC_FS */
-/*
- * This is a version of find_resource and it belongs to kernel/resource.c.
- * Until we have agreement with Linus and Martin, it lingers here.
- *
- * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
- * This probably warrants some sort of hashing.
- */
-static struct resource *_sparc_find_resource(struct resource *root,
- unsigned long hit)
-{
- struct resource *tmp;
-
- for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
- if (tmp->start <= hit && tmp->end >= hit)
- return tmp;
- }
- return NULL;
-}
-
static void register_proc_sparc_ioport(void)
{
#ifdef CONFIG_PROC_FS
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index 6f6544cfa0e..fd6c36b1df7 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -4,12 +4,27 @@
#include <linux/interrupt.h>
#include <asm/traps.h>
+#include <asm/head.h>
+#include <asm/io.h>
/* cpu.c */
extern const char *sparc_pmu_type;
extern unsigned int fsr_storage;
extern int ncpus_probed;
+#ifdef CONFIG_SPARC64
+/* setup_64.c */
+struct seq_file;
+extern void cpucap_info(struct seq_file *);
+
+static inline unsigned long kimage_addr_to_ra(const char *p)
+{
+ unsigned long val = (unsigned long) p;
+
+ return kern_base + (val - KERNBASE);
+}
+#endif
+
#ifdef CONFIG_SPARC32
/* cpu.c */
extern void cpu_probe(void);
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 1d361477d7d..79f31036484 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -47,16 +47,16 @@ kvmap_itlb_tsb_miss:
kvmap_itlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
- KTSB_LOCK_TAG(%g1, %g2, %g7)
+ TSB_LOCK_TAG(%g1, %g2, %g7)
/* Load and check PTE. */
ldxa [%g5] ASI_PHYS_USE_EC, %g5
mov 1, %g7
sllx %g7, TSB_TAG_INVALID_BIT, %g7
brgez,a,pn %g5, kvmap_itlb_longpath
- KTSB_STORE(%g1, %g7)
+ TSB_STORE(%g1, %g7)
- KTSB_WRITE(%g1, %g5, %g6)
+ TSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
@@ -102,9 +102,9 @@ kvmap_itlb_longpath:
kvmap_itlb_obp:
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
- KTSB_LOCK_TAG(%g1, %g2, %g7)
+ TSB_LOCK_TAG(%g1, %g2, %g7)
- KTSB_WRITE(%g1, %g5, %g6)
+ TSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_itlb_load
nop
@@ -112,17 +112,17 @@ kvmap_itlb_obp:
kvmap_dtlb_obp:
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
- KTSB_LOCK_TAG(%g1, %g2, %g7)
+ TSB_LOCK_TAG(%g1, %g2, %g7)
- KTSB_WRITE(%g1, %g5, %g6)
+ TSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_dtlb_load
nop
.align 32
kvmap_dtlb_tsb4m_load:
- KTSB_LOCK_TAG(%g1, %g2, %g7)
- KTSB_WRITE(%g1, %g5, %g6)
+ TSB_LOCK_TAG(%g1, %g2, %g7)
+ TSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_dtlb_load
nop
@@ -222,16 +222,16 @@ kvmap_linear_patch:
kvmap_dtlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
- KTSB_LOCK_TAG(%g1, %g2, %g7)
+ TSB_LOCK_TAG(%g1, %g2, %g7)
/* Load and check PTE. */
ldxa [%g5] ASI_PHYS_USE_EC, %g5
mov 1, %g7
sllx %g7, TSB_TAG_INVALID_BIT, %g7
brgez,a,pn %g5, kvmap_dtlb_longpath
- KTSB_STORE(%g1, %g7)
+ TSB_STORE(%g1, %g7)
- KTSB_WRITE(%g1, %g5, %g6)
+ TSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 42f28c7420e..acaebb63c4f 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -508,6 +508,8 @@ const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
}
EXPORT_SYMBOL(mdesc_node_name);
+static u64 max_cpus = 64;
+
static void __init report_platform_properties(void)
{
struct mdesc_handle *hp = mdesc_grab();
@@ -543,8 +545,10 @@ static void __init report_platform_properties(void)
if (v)
printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
v = mdesc_get_property(hp, pn, "max-cpus", NULL);
- if (v)
- printk("PLATFORM: max-cpus [%llu]\n", *v);
+ if (v) {
+ max_cpus = *v;
+ printk("PLATFORM: max-cpus [%llu]\n", max_cpus);
+ }
#ifdef CONFIG_SMP
{
@@ -715,7 +719,7 @@ static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
}
static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
- unsigned char def)
+ unsigned long def, unsigned long max)
{
u64 val;
@@ -726,6 +730,9 @@ static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
if (!val || val >= 64)
goto use_default;
+ if (val > max)
+ val = max;
+
*mask = ((1U << val) * 64U) - 1U;
return;
@@ -736,19 +743,28 @@ use_default:
static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
struct trap_per_cpu *tb)
{
+ static int printed;
const u64 *val;
val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
- get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);
+ get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2));
val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
- get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);
+ get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8);
val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
- get_one_mondo_bits(val, &tb->resum_qmask, 6);
+ get_one_mondo_bits(val, &tb->resum_qmask, 6, 7);
val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
- get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
+ get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2);
+ if (!printed++) {
+ pr_info("SUN4V: Mondo queue sizes "
+ "[cpu(%u) dev(%u) r(%u) nr(%u)]\n",
+ tb->cpu_mondo_qmask + 1,
+ tb->dev_mondo_qmask + 1,
+ tb->resum_qmask + 1,
+ tb->nonresum_qmask + 1);
+ }
}
static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index c4dd0999da8..3e9daea1653 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -29,6 +29,7 @@
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/initrd.h>
+#include <linux/module.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -46,6 +47,8 @@
#include <asm/mmu.h>
#include <asm/ns87303.h>
#include <asm/btext.h>
+#include <asm/elf.h>
+#include <asm/mdesc.h>
#ifdef CONFIG_IP_PNP
#include <net/ipconfig.h>
@@ -269,6 +272,40 @@ void __init sun4v_patch(void)
sun4v_hvapi_init();
}
+static void __init popc_patch(void)
+{
+ struct popc_3insn_patch_entry *p3;
+ struct popc_6insn_patch_entry *p6;
+
+ p3 = &__popc_3insn_patch;
+ while (p3 < &__popc_3insn_patch_end) {
+ unsigned long i, addr = p3->addr;
+
+ for (i = 0; i < 3; i++) {
+ *(unsigned int *) (addr + (i * 4)) = p3->insns[i];
+ wmb();
+ __asm__ __volatile__("flush %0"
+ : : "r" (addr + (i * 4)));
+ }
+
+ p3++;
+ }
+
+ p6 = &__popc_6insn_patch;
+ while (p6 < &__popc_6insn_patch_end) {
+ unsigned long i, addr = p6->addr;
+
+ for (i = 0; i < 6; i++) {
+ *(unsigned int *) (addr + (i * 4)) = p6->insns[i];
+ wmb();
+ __asm__ __volatile__("flush %0"
+ : : "r" (addr + (i * 4)));
+ }
+
+ p6++;
+ }
+}
+
#ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu)
{
@@ -278,6 +315,154 @@ void __init boot_cpu_id_too_large(int cpu)
}
#endif
+/* On Ultra, we support all of the v8 capabilities. */
+unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
+ HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
+ HWCAP_SPARC_V9);
+EXPORT_SYMBOL(sparc64_elf_hwcap);
+
+static const char *hwcaps[] = {
+ "flush", "stbar", "swap", "muldiv", "v9",
+ "ultra3", "blkinit", "n2",
+
+ /* These strings are as they appear in the machine description
+ * 'hwcap-list' property for cpu nodes.
+ */
+ "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
+ "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
+ "ima", "cspare",
+};
+
+void cpucap_info(struct seq_file *m)
+{
+ unsigned long caps = sparc64_elf_hwcap;
+ int i, printed = 0;
+
+ seq_puts(m, "cpucaps\t\t: ");
+ for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
+ unsigned long bit = 1UL << i;
+ if (caps & bit) {
+ seq_printf(m, "%s%s",
+ printed ? "," : "", hwcaps[i]);
+ printed++;
+ }
+ }
+ seq_putc(m, '\n');
+}
+
+static void __init report_hwcaps(unsigned long caps)
+{
+ int i, printed = 0;
+
+ printk(KERN_INFO "CPU CAPS: [");
+ for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
+ unsigned long bit = 1UL << i;
+ if (caps & bit) {
+ printk(KERN_CONT "%s%s",
+ printed ? "," : "", hwcaps[i]);
+ if (++printed == 8) {
+ printk(KERN_CONT "]\n");
+ printk(KERN_INFO "CPU CAPS: [");
+ printed = 0;
+ }
+ }
+ }
+ printk(KERN_CONT "]\n");
+}
+
+static unsigned long __init mdesc_cpu_hwcap_list(void)
+{
+ struct mdesc_handle *hp;
+ unsigned long caps = 0;
+ const char *prop;
+ int len;
+ u64 pn;
+
+ hp = mdesc_grab();
+ if (!hp)
+ return 0;
+
+ pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
+ if (pn == MDESC_NODE_NULL)
+ goto out;
+
+ prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
+ if (!prop)
+ goto out;
+
+ while (len) {
+ int i, plen;
+
+ for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
+ unsigned long bit = 1UL << i;
+
+ if (!strcmp(prop, hwcaps[i])) {
+ caps |= bit;
+ break;
+ }
+ }
+
+ plen = strlen(prop) + 1;
+ prop += plen;
+ len -= plen;
+ }
+
+out:
+ mdesc_release(hp);
+ return caps;
+}
+
+/* This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+static void __init init_sparc64_elf_hwcap(void)
+{
+ unsigned long cap = sparc64_elf_hwcap;
+ unsigned long mdesc_caps;
+
+ if (tlb_type == cheetah || tlb_type == cheetah_plus)
+ cap |= HWCAP_SPARC_ULTRA3;
+ else if (tlb_type == hypervisor) {
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+ cap |= HWCAP_SPARC_BLKINIT;
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+ cap |= HWCAP_SPARC_N2;
+ }
+
+ cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
+
+ mdesc_caps = mdesc_cpu_hwcap_list();
+ if (!mdesc_caps) {
+ if (tlb_type == spitfire)
+ cap |= AV_SPARC_VIS;
+ if (tlb_type == cheetah || tlb_type == cheetah_plus)
+ cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
+ if (tlb_type == cheetah_plus)
+ cap |= AV_SPARC_POPC;
+ if (tlb_type == hypervisor) {
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
+ cap |= AV_SPARC_ASI_BLK_INIT;
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+ cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
+ AV_SPARC_ASI_BLK_INIT |
+ AV_SPARC_POPC);
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+ cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
+ AV_SPARC_FMAF);
+ }
+ }
+ sparc64_elf_hwcap = cap | mdesc_caps;
+
+ report_hwcaps(sparc64_elf_hwcap);
+
+ if (sparc64_elf_hwcap & AV_SPARC_POPC)
+ popc_patch();
+}
+
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
@@ -337,6 +522,7 @@ void __init setup_arch(char **cmdline_p)
init_cur_cpu_trap(current_thread_info());
paging_init();
+ init_sparc64_elf_hwcap();
}
extern int stop_a_enabled;
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
index 372ad59c4cb..83b47ab02d9 100644
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ b/arch/sparc/kernel/sparc_ksyms_64.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/cpudata.h>
@@ -38,5 +39,15 @@ EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
+/* from hweight.S */
+EXPORT_SYMBOL(__arch_hweight8);
+EXPORT_SYMBOL(__arch_hweight16);
+EXPORT_SYMBOL(__arch_hweight32);
+EXPORT_SYMBOL(__arch_hweight64);
+
+/* from ffs_ffz.S */
+EXPORT_SYMBOL(ffs);
+EXPORT_SYMBOL(__ffs);
+
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c
index 8cdbe5946b4..c59af546f52 100644
--- a/arch/sparc/kernel/sstate.c
+++ b/arch/sparc/kernel/sstate.c
@@ -14,14 +14,9 @@
#include <asm/head.h>
#include <asm/io.h>
-static int hv_supports_soft_state;
-
-static unsigned long kimage_addr_to_ra(const char *p)
-{
- unsigned long val = (unsigned long) p;
+#include "kernel.h"
- return kern_base + (val - KERNBASE);
-}
+static int hv_supports_soft_state;
static void do_set_sstate(unsigned long state, const char *msg)
{
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 35cff1673aa..76e4ac1a13e 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -22,6 +22,7 @@
#include <linux/bitops.h>
#include <linux/perf_event.h>
#include <linux/ratelimit.h>
+#include <linux/bitops.h>
#include <asm/fpumacro.h>
enum direction {
@@ -373,16 +374,11 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
}
}
-static char popc_helper[] = {
-0, 1, 1, 2, 1, 2, 2, 3,
-1, 2, 2, 3, 2, 3, 3, 4,
-};
-
int handle_popc(u32 insn, struct pt_regs *regs)
{
- u64 value;
- int ret, i, rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+ int ret, rd = ((insn >> 25) & 0x1f);
+ u64 value;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (insn & 0x2000) {
@@ -392,10 +388,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
value = fetch_reg(insn & 0x1f, regs);
}
- for (ret = 0, i = 0; i < 16; i++) {
- ret += popc_helper[value & 0xf];
- value >>= 4;
- }
+ ret = hweight64(value);
if (rd < 16) {
if (rd)
regs->u_regs[rd] = ret;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index c0220759003..0e1605697b4 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -107,7 +107,26 @@ SECTIONS
*(.sun4v_2insn_patch)
__sun4v_2insn_patch_end = .;
}
-
+ .swapper_tsb_phys_patch : {
+ __swapper_tsb_phys_patch = .;
+ *(.swapper_tsb_phys_patch)
+ __swapper_tsb_phys_patch_end = .;
+ }
+ .swapper_4m_tsb_phys_patch : {
+ __swapper_4m_tsb_phys_patch = .;
+ *(.swapper_4m_tsb_phys_patch)
+ __swapper_4m_tsb_phys_patch_end = .;
+ }
+ .popc_3insn_patch : {
+ __popc_3insn_patch = .;
+ *(.popc_3insn_patch)
+ __popc_3insn_patch_end = .;
+ }
+ .popc_6insn_patch : {
+ __popc_6insn_patch = .;
+ *(.popc_6insn_patch)
+ __popc_6insn_patch_end = .;
+ }
PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 7f01b8fce8b..a3fc4375a15 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -31,13 +31,13 @@ lib-$(CONFIG_SPARC64) += NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o
lib-$(CONFIG_SPARC64) += NGpatch.o NGpage.o NGbzero.o
lib-$(CONFIG_SPARC64) += NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o
-lib-$(CONFIG_SPARC64) += NG2patch.o NG2page.o
+lib-$(CONFIG_SPARC64) += NG2patch.o
lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
-lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o
+lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-y += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o
diff --git a/arch/sparc/lib/NG2page.S b/arch/sparc/lib/NG2page.S
deleted file mode 100644
index 73b6b7c72cb..00000000000
--- a/arch/sparc/lib/NG2page.S
+++ /dev/null
@@ -1,61 +0,0 @@
-/* NG2page.S: Niagara-2 optimized clear and copy page.
- *
- * Copyright (C) 2007 (davem@davemloft.net)
- */
-
-#include <asm/asi.h>
-#include <asm/page.h>
-#include <asm/visasm.h>
-
- .text
- .align 32
-
- /* This is heavily simplified from the sun4u variants
- * because Niagara-2 does not have any D-cache aliasing issues.
- */
-NG2copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
- prefetch [%o1 + 0x00], #one_read
- prefetch [%o1 + 0x40], #one_read
- VISEntryHalf
- set PAGE_SIZE, %g7
- sub %o0, %o1, %g3
-1: stxa %g0, [%o1 + %g3] ASI_BLK_INIT_QUAD_LDD_P
- subcc %g7, 64, %g7
- ldda [%o1] ASI_BLK_P, %f0
- stda %f0, [%o1 + %g3] ASI_BLK_P
- add %o1, 64, %o1
- bne,pt %xcc, 1b
- prefetch [%o1 + 0x40], #one_read
- membar #Sync
- VISExitHalf
- retl
- nop
-
-#define BRANCH_ALWAYS 0x10680000
-#define NOP 0x01000000
-#define NG_DO_PATCH(OLD, NEW) \
- sethi %hi(NEW), %g1; \
- or %g1, %lo(NEW), %g1; \
- sethi %hi(OLD), %g2; \
- or %g2, %lo(OLD), %g2; \
- sub %g1, %g2, %g1; \
- sethi %hi(BRANCH_ALWAYS), %g3; \
- sll %g1, 11, %g1; \
- srl %g1, 11 + 2, %g1; \
- or %g3, %lo(BRANCH_ALWAYS), %g3; \
- or %g3, %g1, %g3; \
- stw %g3, [%g2]; \
- sethi %hi(NOP), %g3; \
- or %g3, %lo(NOP), %g3; \
- stw %g3, [%g2 + 0x4]; \
- flush %g2;
-
- .globl niagara2_patch_pageops
- .type niagara2_patch_pageops,#function
-niagara2_patch_pageops:
- NG_DO_PATCH(copy_user_page, NG2copy_user_page)
- NG_DO_PATCH(_clear_page, NGclear_page)
- NG_DO_PATCH(clear_user_page, NGclear_user_page)
- retl
- nop
- .size niagara2_patch_pageops,.-niagara2_patch_pageops
diff --git a/arch/sparc/lib/NGpage.S b/arch/sparc/lib/NGpage.S
index 428920de05b..b9e790b9c6b 100644
--- a/arch/sparc/lib/NGpage.S
+++ b/arch/sparc/lib/NGpage.S
@@ -16,55 +16,91 @@
*/
NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
- prefetch [%o1 + 0x00], #one_read
- mov 8, %g1
- mov 16, %g2
- mov 24, %g3
+ save %sp, -192, %sp
+ rd %asi, %g3
+ wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
set PAGE_SIZE, %g7
+ prefetch [%i1 + 0x00], #one_read
+ prefetch [%i1 + 0x40], #one_read
-1: ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2
- ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4
- prefetch [%o1 + 0x40], #one_read
- add %o1, 32, %o1
- stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
- stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
- ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2
- stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
- stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
- ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4
- add %o1, 32, %o1
- add %o0, 32, %o0
- stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
- stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
- stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
- stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
- subcc %g7, 64, %g7
+1: prefetch [%i1 + 0x80], #one_read
+ prefetch [%i1 + 0xc0], #one_read
+ ldda [%i1 + 0x00] %asi, %o2
+ ldda [%i1 + 0x10] %asi, %o4
+ ldda [%i1 + 0x20] %asi, %l2
+ ldda [%i1 + 0x30] %asi, %l4
+ stxa %o2, [%i0 + 0x00] %asi
+ stxa %o3, [%i0 + 0x08] %asi
+ stxa %o4, [%i0 + 0x10] %asi
+ stxa %o5, [%i0 + 0x18] %asi
+ stxa %l2, [%i0 + 0x20] %asi
+ stxa %l3, [%i0 + 0x28] %asi
+ stxa %l4, [%i0 + 0x30] %asi
+ stxa %l5, [%i0 + 0x38] %asi
+ ldda [%i1 + 0x40] %asi, %o2
+ ldda [%i1 + 0x50] %asi, %o4
+ ldda [%i1 + 0x60] %asi, %l2
+ ldda [%i1 + 0x70] %asi, %l4
+ stxa %o2, [%i0 + 0x40] %asi
+ stxa %o3, [%i0 + 0x48] %asi
+ stxa %o4, [%i0 + 0x50] %asi
+ stxa %o5, [%i0 + 0x58] %asi
+ stxa %l2, [%i0 + 0x60] %asi
+ stxa %l3, [%i0 + 0x68] %asi
+ stxa %l4, [%i0 + 0x70] %asi
+ stxa %l5, [%i0 + 0x78] %asi
+ add %i1, 128, %i1
+ subcc %g7, 128, %g7
bne,pt %xcc, 1b
- add %o0, 32, %o0
+ add %i0, 128, %i0
+ wr %g3, 0x0, %asi
membar #Sync
- retl
- nop
+ ret
+ restore
- .globl NGclear_page, NGclear_user_page
+ .align 32
NGclear_page: /* %o0=dest */
NGclear_user_page: /* %o0=dest, %o1=vaddr */
- mov 8, %g1
- mov 16, %g2
- mov 24, %g3
+ rd %asi, %g3
+ wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
set PAGE_SIZE, %g7
-1: stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
- stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
- stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
- stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
- add %o0, 32, %o0
- stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
- stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
- stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
- stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
- subcc %g7, 64, %g7
+1: stxa %g0, [%o0 + 0x00] %asi
+ stxa %g0, [%o0 + 0x08] %asi
+ stxa %g0, [%o0 + 0x10] %asi
+ stxa %g0, [%o0 + 0x18] %asi
+ stxa %g0, [%o0 + 0x20] %asi
+ stxa %g0, [%o0 + 0x28] %asi
+ stxa %g0, [%o0 + 0x30] %asi
+ stxa %g0, [%o0 + 0x38] %asi
+ stxa %g0, [%o0 + 0x40] %asi
+ stxa %g0, [%o0 + 0x48] %asi
+ stxa %g0, [%o0 + 0x50] %asi
+ stxa %g0, [%o0 + 0x58] %asi
+ stxa %g0, [%o0 + 0x60] %asi
+ stxa %g0, [%o0 + 0x68] %asi
+ stxa %g0, [%o0 + 0x70] %asi
+ stxa %g0, [%o0 + 0x78] %asi
+ stxa %g0, [%o0 + 0x80] %asi
+ stxa %g0, [%o0 + 0x88] %asi
+ stxa %g0, [%o0 + 0x90] %asi
+ stxa %g0, [%o0 + 0x98] %asi
+ stxa %g0, [%o0 + 0xa0] %asi
+ stxa %g0, [%o0 + 0xa8] %asi
+ stxa %g0, [%o0 + 0xb0] %asi
+ stxa %g0, [%o0 + 0xb8] %asi
+ stxa %g0, [%o0 + 0xc0] %asi
+ stxa %g0, [%o0 + 0xc8] %asi
+ stxa %g0, [%o0 + 0xd0] %asi
+ stxa %g0, [%o0 + 0xd8] %asi
+ stxa %g0, [%o0 + 0xe0] %asi
+ stxa %g0, [%o0 + 0xe8] %asi
+ stxa %g0, [%o0 + 0xf0] %asi
+ stxa %g0, [%o0 + 0xf8] %asi
+ subcc %g7, 256, %g7
bne,pt %xcc, 1b
- add %o0, 32, %o0
+ add %o0, 256, %o0
+ wr %g3, 0x0, %asi
membar #Sync
retl
nop
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 8600eb2461b..1d32b54089a 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -65,7 +65,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u)
if (ret != u)
v->counter += a;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
- return ret != u;
+ return ret;
}
EXPORT_SYMBOL(__atomic_add_unless);
diff --git a/arch/sparc/lib/ffs.S b/arch/sparc/lib/ffs.S
new file mode 100644
index 00000000000..b39389f6989
--- /dev/null
+++ b/arch/sparc/lib/ffs.S
@@ -0,0 +1,84 @@
+#include <linux/linkage.h>
+
+ .register %g2,#scratch
+
+ .text
+ .align 32
+
+ENTRY(ffs)
+ brnz,pt %o0, 1f
+ mov 1, %o1
+ retl
+ clr %o0
+ nop
+ nop
+ENTRY(__ffs)
+ sllx %o0, 32, %g1 /* 1 */
+ srlx %o0, 32, %g2
+
+ clr %o1 /* 2 */
+ movrz %g1, %g2, %o0
+
+ movrz %g1, 32, %o1 /* 3 */
+1: clr %o2
+
+ sllx %o0, (64 - 16), %g1 /* 4 */
+ srlx %o0, 16, %g2
+
+ movrz %g1, %g2, %o0 /* 5 */
+ clr %o3
+
+ movrz %g1, 16, %o2 /* 6 */
+ clr %o4
+
+ and %o0, 0xff, %g1 /* 7 */
+ srlx %o0, 8, %g2
+
+ movrz %g1, %g2, %o0 /* 8 */
+ clr %o5
+
+ movrz %g1, 8, %o3 /* 9 */
+ add %o2, %o1, %o2
+
+ and %o0, 0xf, %g1 /* 10 */
+ srlx %o0, 4, %g2
+
+ movrz %g1, %g2, %o0 /* 11 */
+ add %o2, %o3, %o2
+
+ movrz %g1, 4, %o4 /* 12 */
+
+ and %o0, 0x3, %g1 /* 13 */
+ srlx %o0, 2, %g2
+
+ movrz %g1, %g2, %o0 /* 14 */
+ add %o2, %o4, %o2
+
+ movrz %g1, 2, %o5 /* 15 */
+
+ and %o0, 0x1, %g1 /* 16 */
+
+ add %o2, %o5, %o2 /* 17 */
+ xor %g1, 0x1, %g1
+
+ retl /* 18 */
+ add %o2, %g1, %o0
+ENDPROC(ffs)
+ENDPROC(__ffs)
+
+ .section .popc_6insn_patch, "ax"
+ .word ffs
+ brz,pn %o0, 98f
+ neg %o0, %g1
+ xnor %o0, %g1, %o1
+ popc %o1, %o0
+98: retl
+ nop
+ .word __ffs
+ neg %o0, %g1
+ xnor %o0, %g1, %o1
+ popc %o1, %o0
+ retl
+ sub %o0, 1, %o0
+ nop
+ .previous
diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S
new file mode 100644
index 00000000000..95414e0a680
--- /dev/null
+++ b/arch/sparc/lib/hweight.S
@@ -0,0 +1,51 @@
+#include <linux/linkage.h>
+
+ .text
+ .align 32
+ENTRY(__arch_hweight8)
+ ba,pt %xcc, __sw_hweight8
+ nop
+ nop
+ENDPROC(__arch_hweight8)
+ .section .popc_3insn_patch, "ax"
+ .word __arch_hweight8
+ sllx %o0, 64-8, %g1
+ retl
+ popc %g1, %o0
+ .previous
+
+ENTRY(__arch_hweight16)
+ ba,pt %xcc, __sw_hweight16
+ nop
+ nop
+ENDPROC(__arch_hweight16)
+ .section .popc_3insn_patch, "ax"
+ .word __arch_hweight16
+ sllx %o0, 64-16, %g1
+ retl
+ popc %g1, %o0
+ .previous
+
+ENTRY(__arch_hweight32)
+ ba,pt %xcc, __sw_hweight32
+ nop
+ nop
+ENDPROC(__arch_hweight32)
+ .section .popc_3insn_patch, "ax"
+ .word __arch_hweight32
+ sllx %o0, 64-32, %g1
+ retl
+ popc %g1, %o0
+ .previous
+
+ENTRY(__arch_hweight64)
+ ba,pt %xcc, __sw_hweight64
+ nop
+ nop
+ENDPROC(__arch_hweight64)
+ .section .popc_3insn_patch, "ax"
+ .word __arch_hweight64
+ retl
+ popc %o0, %o0
+ nop
+ .previous
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3fd8e18bed8..581531dbc8b 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1597,6 +1597,44 @@ static void __init tsb_phys_patch(void)
static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
+{
+ pa >>= KTSB_PHYS_SHIFT;
+
+ while (start < end) {
+ unsigned int *ia = (unsigned int *)(unsigned long)*start;
+
+ ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
+ __asm__ __volatile__("flush %0" : : "r" (ia));
+
+ ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
+ __asm__ __volatile__("flush %0" : : "r" (ia + 1));
+
+ start++;
+ }
+}
+
+static void ktsb_phys_patch(void)
+{
+ extern unsigned int __swapper_tsb_phys_patch;
+ extern unsigned int __swapper_tsb_phys_patch_end;
+ unsigned long ktsb_pa;
+
+ ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
+ patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
+ &__swapper_tsb_phys_patch_end, ktsb_pa);
+#ifndef CONFIG_DEBUG_PAGEALLOC
+ {
+ extern unsigned int __swapper_4m_tsb_phys_patch;
+ extern unsigned int __swapper_4m_tsb_phys_patch_end;
+ ktsb_pa = (kern_base +
+ ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
+ patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
+ &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
+ }
+#endif
+}
+
static void __init sun4v_ktsb_init(void)
{
unsigned long ktsb_pa;
@@ -1716,8 +1754,10 @@ void __init paging_init(void)
sun4u_pgprot_init();
if (tlb_type == cheetah_plus ||
- tlb_type == hypervisor)
+ tlb_type == hypervisor) {
tsb_phys_patch();
+ ktsb_phys_patch();
+ }
if (tlb_type == hypervisor) {
sun4v_patch_tlb_handlers();
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 0249b8b4db5..b30f71ac0d0 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -12,6 +12,7 @@ config TILE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
select SYS_HYPERVISOR
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 849ab2fa1f5..aec60dc0600 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -2,3 +2,41 @@ include include/asm-generic/Kbuild.asm
header-y += ucontext.h
header-y += hardwall.h
+
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipc.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += local.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += mutex.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += xor.h
diff --git a/arch/tile/include/asm/bug.h b/arch/tile/include/asm/bug.h
deleted file mode 100644
index b12fd89e42e..00000000000
--- a/arch/tile/include/asm/bug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bug.h>
diff --git a/arch/tile/include/asm/bugs.h b/arch/tile/include/asm/bugs.h
deleted file mode 100644
index 61791e1ad9f..00000000000
--- a/arch/tile/include/asm/bugs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bugs.h>
diff --git a/arch/tile/include/asm/cputime.h b/arch/tile/include/asm/cputime.h
deleted file mode 100644
index 6d68ad7e0ea..00000000000
--- a/arch/tile/include/asm/cputime.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/cputime.h>
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h
deleted file mode 100644
index f0a4c256403..00000000000
--- a/arch/tile/include/asm/device.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/device.h>
diff --git a/arch/tile/include/asm/div64.h b/arch/tile/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb2..00000000000
--- a/arch/tile/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/tile/include/asm/emergency-restart.h b/arch/tile/include/asm/emergency-restart.h
deleted file mode 100644
index 3711bd9d50b..00000000000
--- a/arch/tile/include/asm/emergency-restart.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/emergency-restart.h>
diff --git a/arch/tile/include/asm/errno.h b/arch/tile/include/asm/errno.h
deleted file mode 100644
index 4c82b503d92..00000000000
--- a/arch/tile/include/asm/errno.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/errno.h>
diff --git a/arch/tile/include/asm/fb.h b/arch/tile/include/asm/fb.h
deleted file mode 100644
index 3a4988e8df4..00000000000
--- a/arch/tile/include/asm/fb.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fb.h>
diff --git a/arch/tile/include/asm/fcntl.h b/arch/tile/include/asm/fcntl.h
deleted file mode 100644
index 46ab12db573..00000000000
--- a/arch/tile/include/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h
index 51537ff9265..c66f7933bea 100644
--- a/arch/tile/include/asm/fixmap.h
+++ b/arch/tile/include/asm/fixmap.h
@@ -75,12 +75,6 @@ extern void __set_fixmap(enum fixed_addresses idx,
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
#define clear_fixmap(idx) \
__set_fixmap(idx, 0, __pgprot(0))
diff --git a/arch/tile/include/asm/ioctl.h b/arch/tile/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe..00000000000
--- a/arch/tile/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/tile/include/asm/ioctls.h b/arch/tile/include/asm/ioctls.h
deleted file mode 100644
index ec34c760665..00000000000
--- a/arch/tile/include/asm/ioctls.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctls.h>
diff --git a/arch/tile/include/asm/ipc.h b/arch/tile/include/asm/ipc.h
deleted file mode 100644
index a46e3d9c2a3..00000000000
--- a/arch/tile/include/asm/ipc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipc.h>
diff --git a/arch/tile/include/asm/ipcbuf.h b/arch/tile/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d..00000000000
--- a/arch/tile/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/tile/include/asm/irq_regs.h b/arch/tile/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b7027..00000000000
--- a/arch/tile/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/tile/include/asm/kdebug.h b/arch/tile/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b03766..00000000000
--- a/arch/tile/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/tile/include/asm/local.h b/arch/tile/include/asm/local.h
deleted file mode 100644
index c11c530f74d..00000000000
--- a/arch/tile/include/asm/local.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local.h>
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h
deleted file mode 100644
index 1e4b79fe858..00000000000
--- a/arch/tile/include/asm/module.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/module.h>
diff --git a/arch/tile/include/asm/msgbuf.h b/arch/tile/include/asm/msgbuf.h
deleted file mode 100644
index 809134c644a..00000000000
--- a/arch/tile/include/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/tile/include/asm/mutex.h b/arch/tile/include/asm/mutex.h
deleted file mode 100644
index ff6101aa2c7..00000000000
--- a/arch/tile/include/asm/mutex.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/tile/include/asm/param.h b/arch/tile/include/asm/param.h
deleted file mode 100644
index 965d4542797..00000000000
--- a/arch/tile/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/tile/include/asm/parport.h b/arch/tile/include/asm/parport.h
deleted file mode 100644
index cf252af6459..00000000000
--- a/arch/tile/include/asm/parport.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/parport.h>
diff --git a/arch/tile/include/asm/poll.h b/arch/tile/include/asm/poll.h
deleted file mode 100644
index c98509d3149..00000000000
--- a/arch/tile/include/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/tile/include/asm/posix_types.h b/arch/tile/include/asm/posix_types.h
deleted file mode 100644
index 22cae6230ce..00000000000
--- a/arch/tile/include/asm/posix_types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/posix_types.h>
diff --git a/arch/tile/include/asm/resource.h b/arch/tile/include/asm/resource.h
deleted file mode 100644
index 04bc4db8921..00000000000
--- a/arch/tile/include/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/resource.h>
diff --git a/arch/tile/include/asm/scatterlist.h b/arch/tile/include/asm/scatterlist.h
deleted file mode 100644
index 35d786fe93a..00000000000
--- a/arch/tile/include/asm/scatterlist.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/scatterlist.h>
diff --git a/arch/tile/include/asm/sembuf.h b/arch/tile/include/asm/sembuf.h
deleted file mode 100644
index 7673b83cfef..00000000000
--- a/arch/tile/include/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sembuf.h>
diff --git a/arch/tile/include/asm/serial.h b/arch/tile/include/asm/serial.h
deleted file mode 100644
index a0cb0caff15..00000000000
--- a/arch/tile/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/tile/include/asm/shmbuf.h b/arch/tile/include/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de3..00000000000
--- a/arch/tile/include/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/tile/include/asm/shmparam.h b/arch/tile/include/asm/shmparam.h
deleted file mode 100644
index 93f30deb95d..00000000000
--- a/arch/tile/include/asm/shmparam.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmparam.h>
diff --git a/arch/tile/include/asm/socket.h b/arch/tile/include/asm/socket.h
deleted file mode 100644
index 6b71384b9d8..00000000000
--- a/arch/tile/include/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/tile/include/asm/sockios.h b/arch/tile/include/asm/sockios.h
deleted file mode 100644
index def6d4746ee..00000000000
--- a/arch/tile/include/asm/sockios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sockios.h>
diff --git a/arch/tile/include/asm/statfs.h b/arch/tile/include/asm/statfs.h
deleted file mode 100644
index 0b91fe198c2..00000000000
--- a/arch/tile/include/asm/statfs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/statfs.h>
diff --git a/arch/tile/include/asm/termbits.h b/arch/tile/include/asm/termbits.h
deleted file mode 100644
index 3935b106de7..00000000000
--- a/arch/tile/include/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/tile/include/asm/termios.h b/arch/tile/include/asm/termios.h
deleted file mode 100644
index 280d78a9d96..00000000000
--- a/arch/tile/include/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
diff --git a/arch/tile/include/asm/types.h b/arch/tile/include/asm/types.h
deleted file mode 100644
index b9e79bc580d..00000000000
--- a/arch/tile/include/asm/types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/types.h>
diff --git a/arch/tile/include/asm/ucontext.h b/arch/tile/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9f30f..00000000000
--- a/arch/tile/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/tile/include/asm/xor.h b/arch/tile/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b1..00000000000
--- a/arch/tile/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/tile/include/hv/drv_srom_intf.h b/arch/tile/include/hv/drv_srom_intf.h
new file mode 100644
index 00000000000..6395faa6d9e
--- /dev/null
+++ b/arch/tile/include/hv/drv_srom_intf.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * @file drv_srom_intf.h
+ * Interface definitions for the SPI Flash ROM driver.
+ */
+
+#ifndef _SYS_HV_INCLUDE_DRV_SROM_INTF_H
+#define _SYS_HV_INCLUDE_DRV_SROM_INTF_H
+
+/** Read this offset to get the total device size. */
+#define SROM_TOTAL_SIZE_OFF 0xF0000000
+
+/** Read this offset to get the device sector size. */
+#define SROM_SECTOR_SIZE_OFF 0xF0000004
+
+/** Read this offset to get the device page size. */
+#define SROM_PAGE_SIZE_OFF 0xF0000008
+
+/** Write this offset to flush any pending writes. */
+#define SROM_FLUSH_OFF 0xF1000000
+
+/** Write this offset, plus the byte offset of the start of a sector, to
+ * erase a sector. Any write data is ignored, but there must be at least
+ * one byte of write data. Only applies when the driver is in MTD mode.
+ */
+#define SROM_ERASE_OFF 0xF2000000
+
+#endif /* _SYS_HV_INCLUDE_DRV_SROM_INTF_H */
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index c4be58cc5d5..f6f50f2a5e3 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -78,7 +78,6 @@ static struct clocksource cycle_counter_cs = {
.rating = 300,
.read = clocksource_get_cycles,
.mask = CLOCKSOURCE_MASK(64),
- .shift = 22, /* typical value, e.g. x86 tsc uses this */
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -91,8 +90,6 @@ void __init setup_clock(void)
cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED);
sched_clock_mult =
clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT);
- cycle_counter_cs.mult =
- clocksource_hz2mult(cycles_per_sec, cycle_counter_cs.shift);
}
void __init calibrate_delay(void)
@@ -107,7 +104,7 @@ void __init calibrate_delay(void)
void __init time_init(void)
{
/* Initialize and register the clock source. */
- clocksource_register(&cycle_counter_cs);
+ clocksource_register_hz(&cycle_counter_cs, cycles_per_sec);
/* Start up the tile-timer interrupt source on the boot cpu. */
setup_tile_timer();
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 4e10c402302..7309988c979 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -836,8 +836,7 @@ void __init mem_init(void)
#endif
#ifdef CONFIG_FLATMEM
- if (!mem_map)
- BUG();
+ BUG_ON(!mem_map);
#endif
#ifdef CONFIG_HIGHMEM
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7cf916fc1ce..6a47bb22657 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -72,6 +72,7 @@ config X86
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_BPF_JIT if (X86_64 && NET)
select CLKEVT_I8253
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS)
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index d02804d650c..d8e8eefbe24 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -40,8 +40,6 @@
#include <linux/compiler.h>
#include <asm/page.h>
-#include <xen/xen.h>
-
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
@@ -334,6 +332,7 @@ extern void fixup_early_ioremap(void);
extern bool is_early_ioremap_ptep(pte_t *ptep);
#ifdef CONFIG_XEN
+#include <xen/xen.h>
struct bio_vec;
extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 219371546af..0d1171c9772 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -751,8 +751,6 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
:: "a" (eax), "c" (ecx));
}
-extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
-
extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void init_amd_e400_c1e_mask(void);
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 5812404a0d4..f50e7fb2a20 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -149,6 +149,29 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
+{
+ if (!need_resched()) {
+ if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
+ clflush((void *)&current_thread_info()->flags);
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __mwait(ax, cx);
+ }
+}
+
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e1ba8cb24e4..e7e3b019c43 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -438,29 +438,6 @@ void cpu_idle_wait(void)
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
- if (!need_resched()) {
- if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
- clflush((void *)&current_thread_info()->flags);
-
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(ax, cx);
- }
-}
-
/* Default MONITOR/MWAIT with no hints, used for default C1 state */
static void mwait_idle(void)
{
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index a3d0dc59067..7a3b65107a2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -38,6 +38,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
+#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -109,7 +110,8 @@ void cpu_idle(void)
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
- pm_idle();
+ if (cpuidle_idle_call())
+ pm_idle();
start_critical_timings();
}
tick_nohz_restart_sched_tick();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ca6f7ab8df3..f693e44e1bf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>
+#include <linux/cpuidle.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -136,7 +137,8 @@ void cpu_idle(void)
enter_idle();
/* Don't trace irqs off for idle */
stop_critical_timings();
- pm_idle();
+ if (cpuidle_idle_call())
+ pm_idle();
start_critical_timings();
/* In many cases the interrupt that ended idle
diff --git a/arch/x86/platform/mrst/Makefile b/arch/x86/platform/mrst/Makefile
index f61ccdd4934..1ea38775a6d 100644
--- a/arch/x86/platform/mrst/Makefile
+++ b/arch/x86/platform/mrst/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_X86_MRST) += mrst.o
obj-$(CONFIG_X86_MRST) += vrtc.o
obj-$(CONFIG_EARLY_PRINTK_MRST) += early_printk_mrst.o
+obj-$(CONFIG_X86_MRST) += pmu.o
diff --git a/arch/x86/platform/mrst/pmu.c b/arch/x86/platform/mrst/pmu.c
new file mode 100644
index 00000000000..9281da7d91b
--- /dev/null
+++ b/arch/x86/platform/mrst/pmu.c
@@ -0,0 +1,817 @@
+/*
+ * mrst/pmu.c - driver for MRST Power Management Unit
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/sfi.h>
+#include <asm/intel_scu_ipc.h>
+#include "pmu.h"
+
+#define IPCMSG_FW_REVISION 0xF4
+
+struct mrst_device {
+ u16 pci_dev_num; /* DEBUG only */
+ u16 lss;
+ u16 latest_request;
+ unsigned int pci_state_counts[PCI_D3cold + 1]; /* DEBUG only */
+};
+
+/*
+ * comlete list of MRST PCI devices
+ */
+static struct mrst_device mrst_devs[] = {
+/* 0 */ { 0x0800, LSS_SPI0 }, /* Moorestown SPI Ctrl 0 */
+/* 1 */ { 0x0801, LSS_SPI1 }, /* Moorestown SPI Ctrl 1 */
+/* 2 */ { 0x0802, LSS_I2C0 }, /* Moorestown I2C 0 */
+/* 3 */ { 0x0803, LSS_I2C1 }, /* Moorestown I2C 1 */
+/* 4 */ { 0x0804, LSS_I2C2 }, /* Moorestown I2C 2 */
+/* 5 */ { 0x0805, LSS_KBD }, /* Moorestown Keyboard Ctrl */
+/* 6 */ { 0x0806, LSS_USB_HC }, /* Moorestown USB Ctrl */
+/* 7 */ { 0x0807, LSS_SD_HC0 }, /* Moorestown SD Host Ctrl 0 */
+/* 8 */ { 0x0808, LSS_SD_HC1 }, /* Moorestown SD Host Ctrl 1 */
+/* 9 */ { 0x0809, LSS_NAND }, /* Moorestown NAND Ctrl */
+/* 10 */ { 0x080a, LSS_AUDIO }, /* Moorestown Audio Ctrl */
+/* 11 */ { 0x080b, LSS_IMAGING }, /* Moorestown ISP */
+/* 12 */ { 0x080c, LSS_SECURITY }, /* Moorestown Security Controller */
+/* 13 */ { 0x080d, LSS_DISPLAY }, /* Moorestown External Displays */
+/* 14 */ { 0x080e, 0 }, /* Moorestown SCU IPC */
+/* 15 */ { 0x080f, LSS_GPIO }, /* Moorestown GPIO Controller */
+/* 16 */ { 0x0810, 0 }, /* Moorestown Power Management Unit */
+/* 17 */ { 0x0811, LSS_USB_OTG }, /* Moorestown OTG Ctrl */
+/* 18 */ { 0x0812, LSS_SPI2 }, /* Moorestown SPI Ctrl 2 */
+/* 19 */ { 0x0813, 0 }, /* Moorestown SC DMA */
+/* 20 */ { 0x0814, LSS_AUDIO_LPE }, /* Moorestown LPE DMA */
+/* 21 */ { 0x0815, LSS_AUDIO_SSP }, /* Moorestown SSP0 */
+
+/* 22 */ { 0x084F, LSS_SD_HC2 }, /* Moorestown SD Host Ctrl 2 */
+
+/* 23 */ { 0x4102, 0 }, /* Lincroft */
+/* 24 */ { 0x4110, 0 }, /* Lincroft */
+};
+
+/* n.b. We ignore PCI-id 0x815 in LSS9 b/c MeeGo has no driver for it */
+static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0};
+static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803,
+ 0x0804, 0x0805, 0x080f, 0};
+
+/* handle concurrent SMP invokations of pmu_pci_set_power_state() */
+static spinlock_t mrst_pmu_power_state_lock;
+
+static unsigned int wake_counters[MRST_NUM_LSS]; /* DEBUG only */
+static unsigned int pmu_irq_stats[INT_INVALID + 1]; /* DEBUG only */
+
+static int graphics_is_off;
+static int lss_s0i3_enabled;
+static bool mrst_pmu_s0i3_enable;
+
+/* debug counters */
+static u32 pmu_wait_ready_calls;
+static u32 pmu_wait_ready_udelays;
+static u32 pmu_wait_ready_udelays_max;
+static u32 pmu_wait_done_calls;
+static u32 pmu_wait_done_udelays;
+static u32 pmu_wait_done_udelays_max;
+static u32 pmu_set_power_state_entry;
+static u32 pmu_set_power_state_send_cmd;
+
+static struct mrst_device *pci_id_2_mrst_dev(u16 pci_dev_num)
+{
+ int index = 0;
+
+ if ((pci_dev_num >= 0x0800) && (pci_dev_num <= 0x815))
+ index = pci_dev_num - 0x800;
+ else if (pci_dev_num == 0x084F)
+ index = 22;
+ else if (pci_dev_num == 0x4102)
+ index = 23;
+ else if (pci_dev_num == 0x4110)
+ index = 24;
+
+ if (pci_dev_num != mrst_devs[index].pci_dev_num) {
+ WARN_ONCE(1, FW_BUG "Unknown PCI device 0x%04X\n", pci_dev_num);
+ return 0;
+ }
+
+ return &mrst_devs[index];
+}
+
+/**
+ * mrst_pmu_validate_cstates
+ * @dev: cpuidle_device
+ *
+ * Certain states are not appropriate for governor to pick in some cases.
+ * This function will be called as cpuidle_device's prepare callback and
+ * thus tells governor to ignore such states when selecting the next state
+ * to enter.
+ */
+
+#define IDLE_STATE4_IS_C6 4
+#define IDLE_STATE5_IS_S0I3 5
+
+int mrst_pmu_invalid_cstates(void)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * Demote to C4 if the PMU is busy.
+ * Since LSS changes leave the busy bit clear...
+ * busy means either the PMU is waiting for an ACK-C6 that
+ * isn't coming due to an MWAIT that returned immediately;
+ * or we returned from S0i3 successfully, and the PMU
+ * is not done sending us interrupts.
+ */
+ if (pmu_read_busy_status())
+ return 1 << IDLE_STATE4_IS_C6 | 1 << IDLE_STATE5_IS_S0I3;
+
+ /*
+ * Disallow S0i3 if: PMU is not initialized, or CPU1 is active,
+ * or if device LSS is insufficient, or the GPU is active,
+ * or if it has been explicitly disabled.
+ */
+ if (!pmu_reg || !cpumask_equal(cpu_online_mask, cpumask_of(cpu)) ||
+ !lss_s0i3_enabled || !graphics_is_off || !mrst_pmu_s0i3_enable)
+ return 1 << IDLE_STATE5_IS_S0I3;
+ else
+ return 0;
+}
+
+/*
+ * pmu_update_wake_counters(): read PM_WKS, update wake_counters[]
+ * DEBUG only.
+ */
+static void pmu_update_wake_counters(void)
+{
+ int lss;
+ u32 wake_status;
+
+ wake_status = pmu_read_wks();
+
+ for (lss = 0; lss < MRST_NUM_LSS; ++lss) {
+ if (wake_status & (1 << lss))
+ wake_counters[lss]++;
+ }
+}
+
+int mrst_pmu_s0i3_entry(void)
+{
+ int status;
+
+ /* Clear any possible error conditions */
+ pmu_write_ics(0x300);
+
+ /* set wake control to current D-states */
+ pmu_write_wssc(S0I3_SSS_TARGET);
+
+ status = mrst_s0i3_entry(PM_S0I3_COMMAND, &pmu_reg->pm_cmd);
+ pmu_update_wake_counters();
+ return status;
+}
+
+/* poll for maximum of 5ms for busy bit to clear */
+static int pmu_wait_ready(void)
+{
+ int udelays;
+
+ pmu_wait_ready_calls++;
+
+ for (udelays = 0; udelays < 500; ++udelays) {
+ if (udelays > pmu_wait_ready_udelays_max)
+ pmu_wait_ready_udelays_max = udelays;
+
+ if (pmu_read_busy_status() == 0)
+ return 0;
+
+ udelay(10);
+ pmu_wait_ready_udelays++;
+ }
+
+ /*
+ * if this fires, observe
+ * /sys/kernel/debug/mrst_pmu_wait_ready_calls
+ * /sys/kernel/debug/mrst_pmu_wait_ready_udelays
+ */
+ WARN_ONCE(1, "SCU not ready for 5ms");
+ return -EBUSY;
+}
+/* poll for maximum of 50ms us for busy bit to clear */
+static int pmu_wait_done(void)
+{
+ int udelays;
+
+ pmu_wait_done_calls++;
+
+ for (udelays = 0; udelays < 500; ++udelays) {
+ if (udelays > pmu_wait_done_udelays_max)
+ pmu_wait_done_udelays_max = udelays;
+
+ if (pmu_read_busy_status() == 0)
+ return 0;
+
+ udelay(100);
+ pmu_wait_done_udelays++;
+ }
+
+ /*
+ * if this fires, observe
+ * /sys/kernel/debug/mrst_pmu_wait_done_calls
+ * /sys/kernel/debug/mrst_pmu_wait_done_udelays
+ */
+ WARN_ONCE(1, "SCU not done for 50ms");
+ return -EBUSY;
+}
+
+u32 mrst_pmu_msi_is_disabled(void)
+{
+ return pmu_msi_is_disabled();
+}
+
+void mrst_pmu_enable_msi(void)
+{
+ pmu_msi_enable();
+}
+
+/**
+ * pmu_irq - pmu driver interrupt handler
+ * Context: interrupt context
+ */
+static irqreturn_t pmu_irq(int irq, void *dummy)
+{
+ union pmu_pm_ics pmu_ics;
+
+ pmu_ics.value = pmu_read_ics();
+
+ if (!pmu_ics.bits.pending)
+ return IRQ_NONE;
+
+ switch (pmu_ics.bits.cause) {
+ case INT_SPURIOUS:
+ case INT_CMD_DONE:
+ case INT_CMD_ERR:
+ case INT_WAKE_RX:
+ case INT_SS_ERROR:
+ case INT_S0IX_MISS:
+ case INT_NO_ACKC6:
+ pmu_irq_stats[pmu_ics.bits.cause]++;
+ break;
+ default:
+ pmu_irq_stats[INT_INVALID]++;
+ }
+
+ pmu_write_ics(pmu_ics.value); /* Clear pending interrupt */
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Translate PCI power management to MRST LSS D-states
+ */
+static int pci_2_mrst_state(int lss, pci_power_t pci_state)
+{
+ switch (pci_state) {
+ case PCI_D0:
+ if (SSMSK(D0i1, lss) & D0I1_ACG_SSS_TARGET)
+ return D0i1;
+ else
+ return D0;
+ case PCI_D1:
+ return D0i1;
+ case PCI_D2:
+ return D0i2;
+ case PCI_D3hot:
+ case PCI_D3cold:
+ return D0i3;
+ default:
+ WARN(1, "pci_state %d\n", pci_state);
+ return 0;
+ }
+}
+
+static int pmu_issue_command(u32 pm_ssc)
+{
+ union pmu_pm_set_cfg_cmd_t command;
+
+ if (pmu_read_busy_status()) {
+ pr_debug("pmu is busy, Operation not permitted\n");
+ return -1;
+ }
+
+ /*
+ * enable interrupts in PMU so that interrupts are
+ * propagated when ioc bit for a particular set
+ * command is set
+ */
+
+ pmu_irq_enable();
+
+ /* Configure the sub systems for pmu2 */
+
+ pmu_write_ssc(pm_ssc);
+
+ /*
+ * Send the set config command for pmu its configured
+ * for mode CM_IMMEDIATE & hence with No Trigger
+ */
+
+ command.pmu2_params.d_param.cfg_mode = CM_IMMEDIATE;
+ command.pmu2_params.d_param.cfg_delay = 0;
+ command.pmu2_params.d_param.rsvd = 0;
+
+ /* construct the command to send SET_CFG to particular PMU */
+ command.pmu2_params.d_param.cmd = SET_CFG_CMD;
+ command.pmu2_params.d_param.ioc = 0;
+ command.pmu2_params.d_param.mode_id = 0;
+ command.pmu2_params.d_param.sys_state = SYS_STATE_S0I0;
+
+ /* write the value of PM_CMD into particular PMU */
+ pr_debug("pmu command being written %x\n",
+ command.pmu_pm_set_cfg_cmd_value);
+
+ pmu_write_cmd(command.pmu_pm_set_cfg_cmd_value);
+
+ return 0;
+}
+
+static u16 pmu_min_lss_pci_req(u16 *ids, u16 pci_state)
+{
+ u16 existing_request;
+ int i;
+
+ for (i = 0; ids[i]; ++i) {
+ struct mrst_device *mrst_dev;
+
+ mrst_dev = pci_id_2_mrst_dev(ids[i]);
+ if (unlikely(!mrst_dev))
+ continue;
+
+ existing_request = mrst_dev->latest_request;
+ if (existing_request < pci_state)
+ pci_state = existing_request;
+ }
+ return pci_state;
+}
+
+/**
+ * pmu_pci_set_power_state - Callback function is used by all the PCI devices
+ * for a platform specific device power on/shutdown.
+ */
+
+int pmu_pci_set_power_state(struct pci_dev *pdev, pci_power_t pci_state)
+{
+ u32 old_sss, new_sss;
+ int status = 0;
+ struct mrst_device *mrst_dev;
+
+ pmu_set_power_state_entry++;
+
+ BUG_ON(pdev->vendor != PCI_VENDOR_ID_INTEL);
+ BUG_ON(pci_state < PCI_D0 || pci_state > PCI_D3cold);
+
+ mrst_dev = pci_id_2_mrst_dev(pdev->device);
+ if (unlikely(!mrst_dev))
+ return -ENODEV;
+
+ mrst_dev->pci_state_counts[pci_state]++; /* count invocations */
+
+ /* PMU driver calls self as part of PCI initialization, ignore */
+ if (pdev->device == PCI_DEV_ID_MRST_PMU)
+ return 0;
+
+ BUG_ON(!pmu_reg); /* SW bug if called before initialized */
+
+ spin_lock(&mrst_pmu_power_state_lock);
+
+ if (pdev->d3_delay) {
+ dev_dbg(&pdev->dev, "d3_delay %d, should be 0\n",
+ pdev->d3_delay);
+ pdev->d3_delay = 0;
+ }
+ /*
+ * If Lincroft graphics, simply remember state
+ */
+ if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY
+ && !((pdev->class & PCI_SUB_CLASS_MASK) >> 8)) {
+ if (pci_state == PCI_D0)
+ graphics_is_off = 0;
+ else
+ graphics_is_off = 1;
+ goto ret;
+ }
+
+ if (!mrst_dev->lss)
+ goto ret; /* device with no LSS */
+
+ if (mrst_dev->latest_request == pci_state)
+ goto ret; /* no change */
+
+ mrst_dev->latest_request = pci_state; /* record latest request */
+
+ /*
+ * LSS9 and LSS10 contain multiple PCI devices.
+ * Use the lowest numbered (highest power) state in the LSS
+ */
+ if (mrst_dev->lss == 9)
+ pci_state = pmu_min_lss_pci_req(mrst_lss9_pci_ids, pci_state);
+ else if (mrst_dev->lss == 10)
+ pci_state = pmu_min_lss_pci_req(mrst_lss10_pci_ids, pci_state);
+
+ status = pmu_wait_ready();
+ if (status)
+ goto ret;
+
+ old_sss = pmu_read_sss();
+ new_sss = old_sss & ~SSMSK(3, mrst_dev->lss);
+ new_sss |= SSMSK(pci_2_mrst_state(mrst_dev->lss, pci_state),
+ mrst_dev->lss);
+
+ if (new_sss == old_sss)
+ goto ret; /* nothing to do */
+
+ pmu_set_power_state_send_cmd++;
+
+ status = pmu_issue_command(new_sss);
+
+ if (unlikely(status != 0)) {
+ dev_err(&pdev->dev, "Failed to Issue a PM command\n");
+ goto ret;
+ }
+
+ if (pmu_wait_done())
+ goto ret;
+
+ lss_s0i3_enabled =
+ ((pmu_read_sss() & S0I3_SSS_TARGET) == S0I3_SSS_TARGET);
+ret:
+ spin_unlock(&mrst_pmu_power_state_lock);
+ return status;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static char *d0ix_names[] = {"D0", "D0i1", "D0i2", "D0i3"};
+
+static inline const char *d0ix_name(int state)
+{
+ return d0ix_names[(int) state];
+}
+
+static int debug_mrst_pmu_show(struct seq_file *s, void *unused)
+{
+ struct pci_dev *pdev = NULL;
+ u32 cur_pmsss;
+ int lss;
+
+ seq_printf(s, "0x%08X D0I1_ACG_SSS_TARGET\n", D0I1_ACG_SSS_TARGET);
+
+ cur_pmsss = pmu_read_sss();
+
+ seq_printf(s, "0x%08X S0I3_SSS_TARGET\n", S0I3_SSS_TARGET);
+
+ seq_printf(s, "0x%08X Current SSS ", cur_pmsss);
+ seq_printf(s, lss_s0i3_enabled ? "\n" : "[BLOCKS s0i3]\n");
+
+ if (cpumask_equal(cpu_online_mask, cpumask_of(0)))
+ seq_printf(s, "cpu0 is only cpu online\n");
+ else
+ seq_printf(s, "cpu0 is NOT only cpu online [BLOCKS S0i3]\n");
+
+ seq_printf(s, "GFX: %s\n", graphics_is_off ? "" : "[BLOCKS s0i3]");
+
+
+ for_each_pci_dev(pdev) {
+ int pos;
+ u16 pmcsr;
+ struct mrst_device *mrst_dev;
+ int i;
+
+ mrst_dev = pci_id_2_mrst_dev(pdev->device);
+
+ seq_printf(s, "%s %04x/%04X %-16.16s ",
+ dev_name(&pdev->dev),
+ pdev->vendor, pdev->device,
+ dev_driver_string(&pdev->dev));
+
+ if (unlikely (!mrst_dev)) {
+ seq_printf(s, " UNKNOWN\n");
+ continue;
+ }
+
+ if (mrst_dev->lss)
+ seq_printf(s, "LSS %2d %-4s ", mrst_dev->lss,
+ d0ix_name(((cur_pmsss >>
+ (mrst_dev->lss * 2)) & 0x3)));
+ else
+ seq_printf(s, " ");
+
+ /* PCI PM config space setting */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pos != 0) {
+ pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+ seq_printf(s, "PCI-%-4s",
+ pci_power_name(pmcsr & PCI_PM_CTRL_STATE_MASK));
+ } else {
+ seq_printf(s, " ");
+ }
+
+ seq_printf(s, " %s ", pci_power_name(mrst_dev->latest_request));
+ for (i = 0; i <= PCI_D3cold; ++i)
+ seq_printf(s, "%d ", mrst_dev->pci_state_counts[i]);
+
+ if (mrst_dev->lss) {
+ unsigned int lssmask;
+
+ lssmask = SSMSK(D0i3, mrst_dev->lss);
+
+ if ((lssmask & S0I3_SSS_TARGET) &&
+ ((lssmask & cur_pmsss) !=
+ (lssmask & S0I3_SSS_TARGET)))
+ seq_printf(s , "[BLOCKS s0i3]");
+ }
+
+ seq_printf(s, "\n");
+ }
+ seq_printf(s, "Wake Counters:\n");
+ for (lss = 0; lss < MRST_NUM_LSS; ++lss)
+ seq_printf(s, "LSS%d %d\n", lss, wake_counters[lss]);
+
+ seq_printf(s, "Interrupt Counters:\n");
+ seq_printf(s,
+ "INT_SPURIOUS \t%8u\n" "INT_CMD_DONE \t%8u\n"
+ "INT_CMD_ERR \t%8u\n" "INT_WAKE_RX \t%8u\n"
+ "INT_SS_ERROR \t%8u\n" "INT_S0IX_MISS\t%8u\n"
+ "INT_NO_ACKC6 \t%8u\n" "INT_INVALID \t%8u\n",
+ pmu_irq_stats[INT_SPURIOUS], pmu_irq_stats[INT_CMD_DONE],
+ pmu_irq_stats[INT_CMD_ERR], pmu_irq_stats[INT_WAKE_RX],
+ pmu_irq_stats[INT_SS_ERROR], pmu_irq_stats[INT_S0IX_MISS],
+ pmu_irq_stats[INT_NO_ACKC6], pmu_irq_stats[INT_INVALID]);
+
+ seq_printf(s, "mrst_pmu_wait_ready_calls %8d\n",
+ pmu_wait_ready_calls);
+ seq_printf(s, "mrst_pmu_wait_ready_udelays %8d\n",
+ pmu_wait_ready_udelays);
+ seq_printf(s, "mrst_pmu_wait_ready_udelays_max %8d\n",
+ pmu_wait_ready_udelays_max);
+ seq_printf(s, "mrst_pmu_wait_done_calls %8d\n",
+ pmu_wait_done_calls);
+ seq_printf(s, "mrst_pmu_wait_done_udelays %8d\n",
+ pmu_wait_done_udelays);
+ seq_printf(s, "mrst_pmu_wait_done_udelays_max %8d\n",
+ pmu_wait_done_udelays_max);
+ seq_printf(s, "mrst_pmu_set_power_state_entry %8d\n",
+ pmu_set_power_state_entry);
+ seq_printf(s, "mrst_pmu_set_power_state_send_cmd %8d\n",
+ pmu_set_power_state_send_cmd);
+ seq_printf(s, "SCU busy: %d\n", pmu_read_busy_status());
+
+ return 0;
+}
+
+static int debug_mrst_pmu_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_mrst_pmu_show, NULL);
+}
+
+static const struct file_operations devices_state_operations = {
+ .open = debug_mrst_pmu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* DEBUG_FS */
+
+/*
+ * Validate SCU PCI shim PCI vendor capability byte
+ * against LSS hard-coded in mrst_devs[] above.
+ * DEBUG only.
+ */
+static void pmu_scu_firmware_debug(void)
+{
+ struct pci_dev *pdev = NULL;
+
+ for_each_pci_dev(pdev) {
+ struct mrst_device *mrst_dev;
+ u8 pci_config_lss;
+ int pos;
+
+ mrst_dev = pci_id_2_mrst_dev(pdev->device);
+ if (unlikely(!mrst_dev)) {
+ printk(KERN_ERR FW_BUG "pmu: Unknown "
+ "PCI device 0x%04X\n", pdev->device);
+ continue;
+ }
+
+ if (mrst_dev->lss == 0)
+ continue; /* no LSS in our table */
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+ if (!pos != 0) {
+ printk(KERN_ERR FW_BUG "pmu: 0x%04X "
+ "missing PCI Vendor Capability\n",
+ pdev->device);
+ continue;
+ }
+ pci_read_config_byte(pdev, pos + 4, &pci_config_lss);
+ if (!(pci_config_lss & PCI_VENDOR_CAP_LOG_SS_MASK)) {
+ printk(KERN_ERR FW_BUG "pmu: 0x%04X "
+ "invalid PCI Vendor Capability 0x%x "
+ " expected LSS 0x%X\n",
+ pdev->device, pci_config_lss, mrst_dev->lss);
+ continue;
+ }
+ pci_config_lss &= PCI_VENDOR_CAP_LOG_ID_MASK;
+
+ if (mrst_dev->lss == pci_config_lss)
+ continue;
+
+ printk(KERN_ERR FW_BUG "pmu: 0x%04X LSS = %d, expected %d\n",
+ pdev->device, pci_config_lss, mrst_dev->lss);
+ }
+}
+
+/**
+ * pmu_probe
+ */
+static int __devinit pmu_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ int ret;
+ struct mrst_pmu_reg *pmu;
+
+ /* Init the device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to Enable PCI device\n");
+ return ret;
+ }
+
+ ret = pci_request_regions(pdev, MRST_PMU_DRV_NAME);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto out_err1;
+ }
+
+ /* Map the memory of PMU reg base */
+ pmu = pci_iomap(pdev, 0, 0);
+ if (!pmu) {
+ dev_err(&pdev->dev, "Unable to map the PMU address space\n");
+ ret = -ENOMEM;
+ goto out_err2;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* /sys/kernel/debug/mrst_pmu */
+ (void) debugfs_create_file("mrst_pmu", S_IFREG | S_IRUGO,
+ NULL, NULL, &devices_state_operations);
+#endif
+ pmu_reg = pmu; /* success */
+
+ if (request_irq(pdev->irq, pmu_irq, 0, MRST_PMU_DRV_NAME, NULL)) {
+ dev_err(&pdev->dev, "Registering isr has failed\n");
+ ret = -1;
+ goto out_err3;
+ }
+
+ pmu_scu_firmware_debug();
+
+ pmu_write_wkc(S0I3_WAKE_SOURCES); /* Enable S0i3 wakeup sources */
+
+ pmu_wait_ready();
+
+ pmu_write_ssc(D0I1_ACG_SSS_TARGET); /* Enable Auto-Clock_Gating */
+ pmu_write_cmd(0x201);
+
+ spin_lock_init(&mrst_pmu_power_state_lock);
+
+ /* Enable the hardware interrupt */
+ pmu_irq_enable();
+ return 0;
+
+out_err3:
+ free_irq(pdev->irq, NULL);
+ pci_iounmap(pdev, pmu_reg);
+ pmu_reg = NULL;
+out_err2:
+ pci_release_region(pdev, 0);
+out_err1:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void __devexit pmu_remove(struct pci_dev *pdev)
+{
+ dev_err(&pdev->dev, "Mid PM pmu_remove called\n");
+
+ /* Freeing up the irq */
+ free_irq(pdev->irq, NULL);
+
+ pci_iounmap(pdev, pmu_reg);
+ pmu_reg = NULL;
+
+ /* disable the current PCI device */
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pmu_pci_ids) = {
+ { PCI_VDEVICE(INTEL, PCI_DEV_ID_MRST_PMU), 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, pmu_pci_ids);
+
+static struct pci_driver driver = {
+ .name = MRST_PMU_DRV_NAME,
+ .id_table = pmu_pci_ids,
+ .probe = pmu_probe,
+ .remove = __devexit_p(pmu_remove),
+};
+
+/**
+ * pmu_pci_register - register the PMU driver as PCI device
+ */
+static int __init pmu_pci_register(void)
+{
+ return pci_register_driver(&driver);
+}
+
+/* Register and probe via fs_initcall() to preceed device_initcall() */
+fs_initcall(pmu_pci_register);
+
+static void __exit mid_pci_cleanup(void)
+{
+ pci_unregister_driver(&driver);
+}
+
+static int ia_major;
+static int ia_minor;
+
+static int pmu_sfi_parse_oem(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+
+ sb = (struct sfi_table_simple *)table;
+ ia_major = (sb->pentry[1] >> 0) & 0xFFFF;
+ ia_minor = (sb->pentry[1] >> 16) & 0xFFFF;
+ printk(KERN_INFO "mrst_pmu: IA FW version v%x.%x\n",
+ ia_major, ia_minor);
+
+ return 0;
+}
+
+static int __init scu_fw_check(void)
+{
+ int ret;
+ u32 fw_version;
+
+ if (!pmu_reg)
+ return 0; /* this driver didn't probe-out */
+
+ sfi_table_parse("OEMB", NULL, NULL, pmu_sfi_parse_oem);
+
+ if (ia_major < 0x6005 || ia_minor < 0x1525) {
+ WARN(1, "mrst_pmu: IA FW version too old\n");
+ return -1;
+ }
+
+ ret = intel_scu_ipc_command(IPCMSG_FW_REVISION, 0, NULL, 0,
+ &fw_version, 1);
+
+ if (ret) {
+ WARN(1, "mrst_pmu: IPC FW version? %d\n", ret);
+ } else {
+ int scu_major = (fw_version >> 8) & 0xFF;
+ int scu_minor = (fw_version >> 0) & 0xFF;
+
+ printk(KERN_INFO "mrst_pmu: firmware v%x\n", fw_version);
+
+ if ((scu_major >= 0xC0) && (scu_minor >= 0x49)) {
+ printk(KERN_INFO "mrst_pmu: enabling S0i3\n");
+ mrst_pmu_s0i3_enable = true;
+ } else {
+ WARN(1, "mrst_pmu: S0i3 disabled, old firmware %X.%X",
+ scu_major, scu_minor);
+ }
+ }
+ return 0;
+}
+late_initcall(scu_fw_check);
+module_exit(mid_pci_cleanup);
diff --git a/arch/x86/platform/mrst/pmu.h b/arch/x86/platform/mrst/pmu.h
new file mode 100644
index 00000000000..bfbfe64b167
--- /dev/null
+++ b/arch/x86/platform/mrst/pmu.h
@@ -0,0 +1,234 @@
+/*
+ * mrst/pmu.h - private definitions for MRST Power Management Unit mrst/pmu.c
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _MRST_PMU_H_
+#define _MRST_PMU_H_
+
+#define PCI_DEV_ID_MRST_PMU 0x0810
+#define MRST_PMU_DRV_NAME "mrst_pmu"
+#define PCI_SUB_CLASS_MASK 0xFF00
+
+#define PCI_VENDOR_CAP_LOG_ID_MASK 0x7F
+#define PCI_VENDOR_CAP_LOG_SS_MASK 0x80
+
+#define SUB_SYS_ALL_D0I1 0x01155555
+#define S0I3_WAKE_SOURCES 0x00001FFF
+
+#define PM_S0I3_COMMAND \
+ ((0 << 31) | /* Reserved */ \
+ (0 << 30) | /* Core must be idle */ \
+ (0xc2 << 22) | /* ACK C6 trigger */ \
+ (3 << 19) | /* Trigger on DMI message */ \
+ (3 << 16) | /* Enter S0i3 */ \
+ (0 << 13) | /* Numeric mode ID (sw) */ \
+ (3 << 9) | /* Trigger mode */ \
+ (0 << 8) | /* Do not interrupt */ \
+ (1 << 0)) /* Set configuration */
+
+#define LSS_DMI 0
+#define LSS_SD_HC0 1
+#define LSS_SD_HC1 2
+#define LSS_NAND 3
+#define LSS_IMAGING 4
+#define LSS_SECURITY 5
+#define LSS_DISPLAY 6
+#define LSS_USB_HC 7
+#define LSS_USB_OTG 8
+#define LSS_AUDIO 9
+#define LSS_AUDIO_LPE 9
+#define LSS_AUDIO_SSP 9
+#define LSS_I2C0 10
+#define LSS_I2C1 10
+#define LSS_I2C2 10
+#define LSS_KBD 10
+#define LSS_SPI0 10
+#define LSS_SPI1 10
+#define LSS_SPI2 10
+#define LSS_GPIO 10
+#define LSS_SRAM 11 /* used by SCU, do not touch */
+#define LSS_SD_HC2 12
+/* LSS hardware bits 15,14,13 are hardwired to 0, thus unusable */
+#define MRST_NUM_LSS 13
+
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define SSMSK(mask, lss) ((mask) << ((lss) * 2))
+#define D0 0
+#define D0i1 1
+#define D0i2 2
+#define D0i3 3
+
+#define S0I3_SSS_TARGET ( \
+ SSMSK(D0i1, LSS_DMI) | \
+ SSMSK(D0i3, LSS_SD_HC0) | \
+ SSMSK(D0i3, LSS_SD_HC1) | \
+ SSMSK(D0i3, LSS_NAND) | \
+ SSMSK(D0i3, LSS_SD_HC2) | \
+ SSMSK(D0i3, LSS_IMAGING) | \
+ SSMSK(D0i3, LSS_SECURITY) | \
+ SSMSK(D0i3, LSS_DISPLAY) | \
+ SSMSK(D0i3, LSS_USB_HC) | \
+ SSMSK(D0i3, LSS_USB_OTG) | \
+ SSMSK(D0i3, LSS_AUDIO) | \
+ SSMSK(D0i1, LSS_I2C0))
+
+/*
+ * D0i1 on Langwell is Autonomous Clock Gating (ACG).
+ * Enable ACG on every LSS except camera and audio
+ */
+#define D0I1_ACG_SSS_TARGET \
+ (SUB_SYS_ALL_D0I1 & ~SSMSK(D0i1, LSS_IMAGING) & ~SSMSK(D0i1, LSS_AUDIO))
+
+enum cm_mode {
+ CM_NOP, /* ignore the config mode value */
+ CM_IMMEDIATE,
+ CM_DELAY,
+ CM_TRIGGER,
+ CM_INVALID
+};
+
+enum sys_state {
+ SYS_STATE_S0I0,
+ SYS_STATE_S0I1,
+ SYS_STATE_S0I2,
+ SYS_STATE_S0I3,
+ SYS_STATE_S3,
+ SYS_STATE_S5
+};
+
+#define SET_CFG_CMD 1
+
+enum int_status {
+ INT_SPURIOUS = 0,
+ INT_CMD_DONE = 1,
+ INT_CMD_ERR = 2,
+ INT_WAKE_RX = 3,
+ INT_SS_ERROR = 4,
+ INT_S0IX_MISS = 5,
+ INT_NO_ACKC6 = 6,
+ INT_INVALID = 7,
+};
+
+/* PMU register interface */
+static struct mrst_pmu_reg {
+ u32 pm_sts; /* 0x00 */
+ u32 pm_cmd; /* 0x04 */
+ u32 pm_ics; /* 0x08 */
+ u32 _resv1; /* 0x0C */
+ u32 pm_wkc[2]; /* 0x10 */
+ u32 pm_wks[2]; /* 0x18 */
+ u32 pm_ssc[4]; /* 0x20 */
+ u32 pm_sss[4]; /* 0x30 */
+ u32 pm_wssc[4]; /* 0x40 */
+ u32 pm_c3c4; /* 0x50 */
+ u32 pm_c5c6; /* 0x54 */
+ u32 pm_msi_disable; /* 0x58 */
+} *pmu_reg;
+
+static inline u32 pmu_read_sts(void) { return readl(&pmu_reg->pm_sts); }
+static inline u32 pmu_read_ics(void) { return readl(&pmu_reg->pm_ics); }
+static inline u32 pmu_read_wks(void) { return readl(&pmu_reg->pm_wks[0]); }
+static inline u32 pmu_read_sss(void) { return readl(&pmu_reg->pm_sss[0]); }
+
+static inline void pmu_write_cmd(u32 arg) { writel(arg, &pmu_reg->pm_cmd); }
+static inline void pmu_write_ics(u32 arg) { writel(arg, &pmu_reg->pm_ics); }
+static inline void pmu_write_wkc(u32 arg) { writel(arg, &pmu_reg->pm_wkc[0]); }
+static inline void pmu_write_ssc(u32 arg) { writel(arg, &pmu_reg->pm_ssc[0]); }
+static inline void pmu_write_wssc(u32 arg)
+ { writel(arg, &pmu_reg->pm_wssc[0]); }
+
+static inline void pmu_msi_enable(void) { writel(0, &pmu_reg->pm_msi_disable); }
+static inline u32 pmu_msi_is_disabled(void)
+ { return readl(&pmu_reg->pm_msi_disable); }
+
+union pmu_pm_ics {
+ struct {
+ u32 cause:8;
+ u32 enable:1;
+ u32 pending:1;
+ u32 reserved:22;
+ } bits;
+ u32 value;
+};
+
+static inline void pmu_irq_enable(void)
+{
+ union pmu_pm_ics pmu_ics;
+
+ pmu_ics.value = pmu_read_ics();
+ pmu_ics.bits.enable = 1;
+ pmu_write_ics(pmu_ics.value);
+}
+
+union pmu_pm_status {
+ struct {
+ u32 pmu_rev:8;
+ u32 pmu_busy:1;
+ u32 mode_id:4;
+ u32 Reserved:19;
+ } pmu_status_parts;
+ u32 pmu_status_value;
+};
+
+static inline int pmu_read_busy_status(void)
+{
+ union pmu_pm_status result;
+
+ result.pmu_status_value = pmu_read_sts();
+
+ return result.pmu_status_parts.pmu_busy;
+}
+
+/* pmu set config parameters */
+struct cfg_delay_param_t {
+ u32 cmd:8;
+ u32 ioc:1;
+ u32 cfg_mode:4;
+ u32 mode_id:3;
+ u32 sys_state:3;
+ u32 cfg_delay:8;
+ u32 rsvd:5;
+};
+
+struct cfg_trig_param_t {
+ u32 cmd:8;
+ u32 ioc:1;
+ u32 cfg_mode:4;
+ u32 mode_id:3;
+ u32 sys_state:3;
+ u32 cfg_trig_type:3;
+ u32 cfg_trig_val:8;
+ u32 cmbi:1;
+ u32 rsvd1:1;
+};
+
+union pmu_pm_set_cfg_cmd_t {
+ union {
+ struct cfg_delay_param_t d_param;
+ struct cfg_trig_param_t t_param;
+ } pmu2_params;
+ u32 pmu_pm_set_cfg_cmd_value;
+};
+
+#ifdef FUTURE_PATCH
+extern int mrst_s0i3_entry(u32 regval, u32 *regaddr);
+#else
+static inline int mrst_s0i3_entry(u32 regval, u32 *regaddr) { return -1; }
+#endif
+#endif
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 45e94aca5bc..3326204e251 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -15,7 +15,7 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
grant-table.o suspend.o platform-pci-unplug.o \
p2m.o
-obj-$(CONFIG_FUNCTION_TRACER) += trace.o
+obj-$(CONFIG_FTRACE) += trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 60aeeb56948..df118a825f3 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/memblock.h>
+#include <linux/cpuidle.h>
#include <asm/elf.h>
#include <asm/vdso.h>
@@ -92,8 +93,6 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
if (end <= start)
return 0;
- printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ",
- start, end);
for(pfn = start; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -106,14 +105,14 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
&reservation);
- WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
- start, end, ret);
+ WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) {
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
len++;
}
}
- printk(KERN_CONT "%ld pages freed\n", len);
+ printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
+ start, end, len);
return len;
}
@@ -139,7 +138,7 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
if (last_end < max_addr)
released += xen_release_chunk(last_end, max_addr);
- printk(KERN_INFO "released %ld pages of unused memory\n", released);
+ printk(KERN_INFO "released %lu pages of unused memory\n", released);
return released;
}
@@ -426,7 +425,7 @@ void __init xen_arch_setup(void)
#ifdef CONFIG_X86_32
boot_cpu_data.hlt_works_ok = 1;
#endif
- pm_idle = default_idle;
+ disable_cpuidle();
boot_option_idle_override = IDLE_HALT;
fiddle_vdso();
diff --git a/arch/x86/xen/trace.c b/arch/x86/xen/trace.c
index 734beba2a08..520022d1a18 100644
--- a/arch/x86/xen/trace.c
+++ b/arch/x86/xen/trace.c
@@ -1,4 +1,5 @@
#include <linux/ftrace.h>
+#include <xen/interface/xen.h>
#define N(x) [__HYPERVISOR_##x] = "("#x")"
static const char *xen_hypercall_names[] = {
diff --git a/block/blk-core.c b/block/blk-core.c
index b850bedad22..b627558c461 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1368,8 +1368,10 @@ static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
static int __init fail_make_request_debugfs(void)
{
- return init_fault_attr_dentries(&fail_make_request,
- "fail_make_request");
+ struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
+ NULL, &fail_make_request);
+
+ return IS_ERR(dir) ? PTR_ERR(dir) : 0;
}
late_initcall(fail_make_request_debugfs);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 4f0c06c7a33..78035488895 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -28,7 +28,10 @@ int blk_should_fake_timeout(struct request_queue *q)
static int __init fail_io_timeout_debugfs(void)
{
- return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout");
+ struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
+ NULL, &fail_io_timeout);
+
+ return IS_ERR(dir) ? PTR_ERR(dir) : 0;
}
late_initcall(fail_io_timeout_debugfs);
diff --git a/crypto/md5.c b/crypto/md5.c
index 30efc7dad89..7febeaab923 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -21,99 +21,9 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/cryptohash.h>
#include <asm/byteorder.h>
-#define F1(x, y, z) (z ^ (x & (y ^ z)))
-#define F2(x, y, z) F1(z, x, y)
-#define F3(x, y, z) (x ^ y ^ z)
-#define F4(x, y, z) (y ^ (x | ~z))
-
-#define MD5STEP(f, w, x, y, z, in, s) \
- (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
-
-static void md5_transform(u32 *hash, u32 const *in)
-{
- u32 a, b, c, d;
-
- a = hash[0];
- b = hash[1];
- c = hash[2];
- d = hash[3];
-
- MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
- MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
- MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
- MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
- MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
- MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
- MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
- MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
- MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
- MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
- MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
- MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
- MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
- MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
- MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
- MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
-
- MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
- MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
- MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
- MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
- MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
- MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
- MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
- MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
- MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
- MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
- MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
- MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
- MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
- MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
- MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
- MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
-
- MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
- MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
- MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
- MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
- MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
- MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
- MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
- MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
- MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
- MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
- MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
- MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
- MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
- MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
- MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
- MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
-
- MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
- MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
- MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
- MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
- MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
- MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
- MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
- MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
- MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
- MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
- MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
- MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
- MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
- MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
- MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
- MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
-
- hash[0] += a;
- hash[1] += b;
- hash[2] += c;
- hash[3] += d;
-}
-
/* XXX: this stuff can be optimized */
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
{
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 73863d86f02..76dc02f1557 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -126,6 +126,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
+/*
+ * Disable runtime checking and repair of values returned by control methods.
+ * Use only if the repair is causing a problem on a particular machine.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index c7f743ca395..5552125d834 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -357,6 +357,7 @@ struct acpi_predefined_data {
char *pathname;
const union acpi_predefined_info *predefined;
union acpi_operand_object *parent_package;
+ struct acpi_namespace_node *node;
u32 flags;
u8 node_flags;
};
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 94e73c97cf8..c445cca490e 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -468,6 +468,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
{{"_TC2", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TDL", 0, ACPI_RTYPE_INTEGER}},
{{"_TIP", 1, ACPI_RTYPE_INTEGER}},
{{"_TIV", 1, ACPI_RTYPE_INTEGER}},
{{"_TMP", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 9fb03fa8ffd..c845c8089f3 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -193,14 +193,20 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
}
/*
- * 1) We have a return value, but if one wasn't expected, just exit, this is
- * not a problem. For example, if the "Implicit Return" feature is
- * enabled, methods will always return a value.
+ * Return value validation and possible repair.
*
- * 2) If the return value can be of any type, then we cannot perform any
- * validation, exit.
+ * 1) Don't perform return value validation/repair if this feature
+ * has been disabled via a global option.
+ *
+ * 2) We have a return value, but if one wasn't expected, just exit,
+ * this is not a problem. For example, if the "Implicit Return"
+ * feature is enabled, methods will always return a value.
+ *
+ * 3) If the return value can be of any type, then we cannot perform
+ * any validation, just exit.
*/
- if ((!predefined->info.expected_btypes) ||
+ if (acpi_gbl_disable_auto_repair ||
+ (!predefined->info.expected_btypes) ||
(predefined->info.expected_btypes == ACPI_RTYPE_ALL)) {
goto cleanup;
}
@@ -212,6 +218,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
goto cleanup;
}
data->predefined = predefined;
+ data->node = node;
data->node_flags = node->flags;
data->pathname = pathname;
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 973883babee..024c4f263f8 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -503,6 +503,21 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data,
{
union acpi_operand_object *return_object = *return_object_ptr;
acpi_status status;
+ struct acpi_namespace_node *node;
+
+ /*
+ * We can only sort the _TSS return package if there is no _PSS in the
+ * same scope. This is because if _PSS is present, the ACPI specification
+ * dictates that the _TSS Power Dissipation field is to be ignored, and
+ * therefore some BIOSs leave garbage values in the _TSS Power field(s).
+ * In this case, it is best to just return the _TSS package as-is.
+ * (May, 2011)
+ */
+ status =
+ acpi_ns_get_node(data->node, "^_PSS", ACPI_NS_NO_UPSEARCH, &node);
+ if (ACPI_SUCCESS(status)) {
+ return (AE_OK);
+ }
status = acpi_ns_check_sorted_list(data, return_object, 5, 1,
ACPI_SORT_DESCENDING,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 48db0944ce4..62365f6075d 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -126,12 +126,29 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
}
/*
- * Originally, we checked the table signature for "SSDT" or "PSDT" here.
- * Next, we added support for OEMx tables, signature "OEM".
- * Valid tables were encountered with a null signature, so we've just
- * given up on validating the signature, since it seems to be a waste
- * of code. The original code was removed (05/2008).
+ * Validate the incoming table signature.
+ *
+ * 1) Originally, we checked the table signature for "SSDT" or "PSDT".
+ * 2) We added support for OEMx tables, signature "OEM".
+ * 3) Valid tables were encountered with a null signature, so we just
+ * gave up on validating the signature, (05/2008).
+ * 4) We encountered non-AML tables such as the MADT, which caused
+ * interpreter errors and kernel faults. So now, we once again allow
+ * only "SSDT", "OEMx", and now, also a null signature. (05/2011).
*/
+ if ((table_desc->pointer->signature[0] != 0x00) &&
+ (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT))
+ && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) {
+ ACPI_ERROR((AE_INFO,
+ "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx",
+ acpi_ut_valid_acpi_name(*(u32 *)table_desc->
+ pointer->
+ signature) ? table_desc->
+ pointer->signature : "????",
+ *(u32 *)table_desc->pointer->signature));
+
+ return_ACPI_STATUS(AE_BAD_SIGNATURE);
+ }
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index f739a70b1c7..c34aa51af4e 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -10,9 +10,11 @@ config ACPI_APEI
error injection.
config ACPI_APEI_GHES
- tristate "APEI Generic Hardware Error Source"
+ bool "APEI Generic Hardware Error Source"
depends on ACPI_APEI && X86
select ACPI_HED
+ select LLIST
+ select GENERIC_ALLOCATOR
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
@@ -30,6 +32,13 @@ config ACPI_APEI_PCIEAER
PCIe AER errors may be reported via APEI firmware first mode.
Turn on this option to enable the corresponding support.
+config ACPI_APEI_MEMORY_FAILURE
+ bool "APEI memory error recovering support"
+ depends on ACPI_APEI && MEMORY_FAILURE
+ help
+ Memory errors may be reported via APEI firmware first mode.
+ Turn on this option to enable the memory recovering support.
+
config ACPI_APEI_EINJ
tristate "APEI Error INJection (EINJ)"
depends on ACPI_APEI && DEBUG_FS
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 4a904a4bf05..8041248fce9 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -157,9 +157,10 @@ EXPORT_SYMBOL_GPL(apei_exec_noop);
* Interpret the specified action. Go through whole action table,
* execute all instructions belong to the action.
*/
-int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
+ bool optional)
{
- int rc;
+ int rc = -ENOENT;
u32 i, ip;
struct acpi_whea_header *entry;
apei_exec_ins_func_t run;
@@ -198,9 +199,9 @@ rewind:
goto rewind;
}
- return 0;
+ return !optional && rc < 0 ? rc : 0;
}
-EXPORT_SYMBOL_GPL(apei_exec_run);
+EXPORT_SYMBOL_GPL(__apei_exec_run);
typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
@@ -603,3 +604,29 @@ struct dentry *apei_get_debugfs_dir(void)
return dapei;
}
EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
+
+int apei_osc_setup(void)
+{
+ static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
+ acpi_handle handle;
+ u32 capbuf[3];
+ struct acpi_osc_context context = {
+ .uuid_str = whea_uuid_str,
+ .rev = 1,
+ .cap.length = sizeof(capbuf),
+ .cap.pointer = capbuf,
+ };
+
+ capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_TYPE] = 0;
+ capbuf[OSC_CONTROL_TYPE] = 0;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
+ || ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ return -EIO;
+ else {
+ kfree(context.ret.pointer);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(apei_osc_setup);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index ef0581f2094..f57050e7a5e 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -50,7 +50,18 @@ static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
return ctx->value;
}
-int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool optional);
+
+static inline int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ return __apei_exec_run(ctx, action, 0);
+}
+
+/* It is optional whether the firmware provides the action */
+static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 action)
+{
+ return __apei_exec_run(ctx, action, 1);
+}
/* Common instruction implementation */
@@ -113,4 +124,6 @@ void apei_estatus_print(const char *pfx,
const struct acpi_hest_generic_status *estatus);
int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+
+int apei_osc_setup(void);
#endif
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index f74b2ea11f2..589b96c3870 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -46,7 +46,8 @@
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
* most will ignore the parameter and make their own choice of address
- * for error injection.
+ * for error injection. This extension is used only if
+ * param_extension module parameter is specified.
*/
struct einj_parameter {
u64 type;
@@ -65,6 +66,9 @@ struct einj_parameter {
((struct acpi_whea_header *)((char *)(tab) + \
sizeof(struct acpi_table_einj)))
+static bool param_extension;
+module_param(param_extension, bool, 0);
+
static struct acpi_table_einj *einj_tab;
static struct apei_resources einj_resources;
@@ -285,7 +289,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
einj_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, type);
@@ -323,7 +327,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
rc = __einj_error_trigger(trigger_paddr);
if (rc)
return rc;
- rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+ rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
return rc;
}
@@ -489,14 +493,6 @@ static int __init einj_init(void)
einj_debug_dir, NULL, &error_type_fops);
if (!fentry)
goto err_cleanup;
- fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param1);
- if (!fentry)
- goto err_cleanup;
- fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param2);
- if (!fentry)
- goto err_cleanup;
fentry = debugfs_create_file("error_inject", S_IWUSR,
einj_debug_dir, NULL, &error_inject_fops);
if (!fentry)
@@ -513,12 +509,23 @@ static int __init einj_init(void)
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
- param_paddr = einj_get_parameter_address();
- if (param_paddr) {
- einj_param = ioremap(param_paddr, sizeof(*einj_param));
- rc = -ENOMEM;
- if (!einj_param)
- goto err_unmap;
+ if (param_extension) {
+ param_paddr = einj_get_parameter_address();
+ if (param_paddr) {
+ einj_param = ioremap(param_paddr, sizeof(*einj_param));
+ rc = -ENOMEM;
+ if (!einj_param)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_unmap;
+ } else
+ pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
}
pr_info(EINJ_PFX "Error INJection is initialized.\n");
@@ -526,6 +533,8 @@ static int __init einj_init(void)
return 0;
err_unmap:
+ if (einj_param)
+ iounmap(einj_param);
apei_exec_post_unmap_gars(&ctx);
err_release:
apei_resources_release(&einj_resources);
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index a4cfb64c86a..903549df809 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -33,7 +33,7 @@
#define ERST_DBG_PFX "ERST DBG: "
-#define ERST_DBG_RECORD_LEN_MAX 4096
+#define ERST_DBG_RECORD_LEN_MAX 0x4000
static void *erst_dbg_buf;
static unsigned int erst_dbg_buf_len;
@@ -213,6 +213,10 @@ static struct miscdevice erst_dbg_dev = {
static __init int erst_dbg_init(void)
{
+ if (erst_disable) {
+ pr_info(ERST_DBG_PFX "ERST support is disabled.\n");
+ return -ENODEV;
+ }
return misc_register(&erst_dbg_dev);
}
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index e6cef8e1b53..2ca59dc69f7 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -642,7 +642,7 @@ static int __erst_write_to_storage(u64 offset)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
@@ -666,7 +666,7 @@ static int __erst_write_to_storage(u64 offset)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -681,7 +681,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
@@ -709,7 +709,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -724,7 +724,7 @@ static int __erst_clear_from_storage(u64 record_id)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, record_id);
@@ -748,7 +748,7 @@ static int __erst_clear_from_storage(u64 record_id)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -932,8 +932,11 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time);
-static u64 erst_writer(enum pstore_type_id type, size_t size);
+ struct timespec *time, struct pstore_info *psi);
+static u64 erst_writer(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi);
+static int erst_clearer(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi);
static struct pstore_info erst_info = {
.owner = THIS_MODULE,
@@ -942,7 +945,7 @@ static struct pstore_info erst_info = {
.close = erst_close_pstore,
.read = erst_reader,
.write = erst_writer,
- .erase = erst_clear
+ .erase = erst_clearer
};
#define CPER_CREATOR_PSTORE \
@@ -983,7 +986,7 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time)
+ struct timespec *time, struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
@@ -1037,7 +1040,8 @@ out:
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
-static u64 erst_writer(enum pstore_type_id type, size_t size)
+static u64 erst_writer(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
(erst_info.buf - sizeof(*rcd));
@@ -1080,6 +1084,12 @@ static u64 erst_writer(enum pstore_type_id type, size_t size)
return rcd->hdr.record_id;
}
+static int erst_clearer(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ return erst_clear(id);
+}
+
static int __init erst_init(void)
{
int rc = 0;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f703b288115..0784f99a466 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -12,7 +12,7 @@
* For more information about Generic Hardware Error Source, please
* refer to ACPI Specification version 4.0, section 17.3.2.6
*
- * Copyright 2010 Intel Corp.
+ * Copyright 2010,2011 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
@@ -42,6 +42,9 @@
#include <linux/mutex.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
+#include <linux/irq_work.h>
+#include <linux/llist.h>
+#include <linux/genalloc.h>
#include <acpi/apei.h>
#include <acpi/atomicio.h>
#include <acpi/hed.h>
@@ -53,6 +56,30 @@
#define GHES_PFX "GHES: "
#define GHES_ESTATUS_MAX_SIZE 65536
+#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
+
+#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
+
+/* This is just an estimation for memory pool allocation */
+#define GHES_ESTATUS_CACHE_AVG_SIZE 512
+
+#define GHES_ESTATUS_CACHES_SIZE 4
+
+#define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
+/* Prevent too many caches are allocated because of RCU */
+#define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
+
+#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
+ (sizeof(struct ghes_estatus_cache) + (estatus_len))
+#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
+ ((struct acpi_hest_generic_status *) \
+ ((struct ghes_estatus_cache *)(estatus_cache) + 1))
+
+#define GHES_ESTATUS_NODE_LEN(estatus_len) \
+ (sizeof(struct ghes_estatus_node) + (estatus_len))
+#define GHES_ESTATUS_FROM_NODE(estatus_node) \
+ ((struct acpi_hest_generic_status *) \
+ ((struct ghes_estatus_node *)(estatus_node) + 1))
/*
* One struct ghes is created for each generic hardware error source.
@@ -77,6 +104,22 @@ struct ghes {
};
};
+struct ghes_estatus_node {
+ struct llist_node llnode;
+ struct acpi_hest_generic *generic;
+};
+
+struct ghes_estatus_cache {
+ u32 estatus_len;
+ atomic_t count;
+ struct acpi_hest_generic *generic;
+ unsigned long long time_in;
+ struct rcu_head rcu;
+};
+
+int ghes_disable;
+module_param_named(disable, ghes_disable, bool, 0);
+
static int ghes_panic_timeout __read_mostly = 30;
/*
@@ -121,6 +164,22 @@ static struct vm_struct *ghes_ioremap_area;
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+/*
+ * printk is not safe in NMI context. So in NMI handler, we allocate
+ * required memory from lock-less memory allocator
+ * (ghes_estatus_pool), save estatus into it, put them into lock-less
+ * list (ghes_estatus_llist), then delay printk into IRQ context via
+ * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
+ * required pool size by all NMI error source.
+ */
+static struct gen_pool *ghes_estatus_pool;
+static unsigned long ghes_estatus_pool_size_request;
+static struct llist_head ghes_estatus_llist;
+static struct irq_work ghes_proc_irq_work;
+
+struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
+static atomic_t ghes_estatus_cache_alloced;
+
static int ghes_ioremap_init(void)
{
ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
@@ -180,6 +239,55 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
__flush_tlb_one(vaddr);
}
+static int ghes_estatus_pool_init(void)
+{
+ ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
+ if (!ghes_estatus_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk,
+ void *data)
+{
+ free_page(chunk->start_addr);
+}
+
+static void ghes_estatus_pool_exit(void)
+{
+ gen_pool_for_each_chunk(ghes_estatus_pool,
+ ghes_estatus_pool_free_chunk_page, NULL);
+ gen_pool_destroy(ghes_estatus_pool);
+}
+
+static int ghes_estatus_pool_expand(unsigned long len)
+{
+ unsigned long i, pages, size, addr;
+ int ret;
+
+ ghes_estatus_pool_size_request += PAGE_ALIGN(len);
+ size = gen_pool_size(ghes_estatus_pool);
+ if (size >= ghes_estatus_pool_size_request)
+ return 0;
+ pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
+ for (i = 0; i < pages; i++) {
+ addr = __get_free_page(GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ghes_estatus_pool_shrink(unsigned long len)
+{
+ ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
+}
+
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
@@ -341,43 +449,196 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_do_proc(struct ghes *ghes)
+static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
{
- int sev, processed = 0;
+ int sev, sec_sev;
struct acpi_hest_generic_data *gdata;
- sev = ghes_severity(ghes->estatus->error_severity);
- apei_estatus_for_each_section(ghes->estatus, gdata) {
-#ifdef CONFIG_X86_MCE
+ sev = ghes_severity(estatus->error_severity);
+ apei_estatus_for_each_section(estatus, gdata) {
+ sec_sev = ghes_severity(gdata->error_severity);
if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PLATFORM_MEM)) {
- apei_mce_report_mem_error(
- sev == GHES_SEV_CORRECTED,
- (struct cper_sec_mem_err *)(gdata+1));
- processed = 1;
- }
+ struct cper_sec_mem_err *mem_err;
+ mem_err = (struct cper_sec_mem_err *)(gdata+1);
+#ifdef CONFIG_X86_MCE
+ apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
+ mem_err);
#endif
+#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
+ if (sev == GHES_SEV_RECOVERABLE &&
+ sec_sev == GHES_SEV_RECOVERABLE &&
+ mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ unsigned long pfn;
+ pfn = mem_err->physical_addr >> PAGE_SHIFT;
+ memory_failure_queue(pfn, 0, 0);
+ }
+#endif
+ }
}
}
-static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
+static void __ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
{
- /* Not more than 2 messages every 5 seconds */
- static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
-
if (pfx == NULL) {
- if (ghes_severity(ghes->estatus->error_severity) <=
+ if (ghes_severity(estatus->error_severity) <=
GHES_SEV_CORRECTED)
pfx = KERN_WARNING HW_ERR;
else
pfx = KERN_ERR HW_ERR;
}
- if (__ratelimit(&ratelimit)) {
- printk(
- "%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
- pfx, ghes->generic->header.source_id);
- apei_estatus_print(pfx, ghes->estatus);
+ printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+ pfx, generic->header.source_id);
+ apei_estatus_print(pfx, estatus);
+}
+
+static int ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
+{
+ /* Not more than 2 messages every 5 seconds */
+ static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
+ static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
+ struct ratelimit_state *ratelimit;
+
+ if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
+ ratelimit = &ratelimit_corrected;
+ else
+ ratelimit = &ratelimit_uncorrected;
+ if (__ratelimit(ratelimit)) {
+ __ghes_print_estatus(pfx, generic, estatus);
+ return 1;
}
+ return 0;
+}
+
+/*
+ * GHES error status reporting throttle, to report more kinds of
+ * errors, instead of just most frequently occurred errors.
+ */
+static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
+{
+ u32 len;
+ int i, cached = 0;
+ unsigned long long now;
+ struct ghes_estatus_cache *cache;
+ struct acpi_hest_generic_status *cache_estatus;
+
+ len = apei_estatus_len(estatus);
+ rcu_read_lock();
+ for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
+ cache = rcu_dereference(ghes_estatus_caches[i]);
+ if (cache == NULL)
+ continue;
+ if (len != cache->estatus_len)
+ continue;
+ cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ if (memcmp(estatus, cache_estatus, len))
+ continue;
+ atomic_inc(&cache->count);
+ now = sched_clock();
+ if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
+ cached = 1;
+ break;
+ }
+ rcu_read_unlock();
+ return cached;
+}
+
+static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
+ struct acpi_hest_generic *generic,
+ struct acpi_hest_generic_status *estatus)
+{
+ int alloced;
+ u32 len, cache_len;
+ struct ghes_estatus_cache *cache;
+ struct acpi_hest_generic_status *cache_estatus;
+
+ alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
+ if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
+ atomic_dec(&ghes_estatus_cache_alloced);
+ return NULL;
+ }
+ len = apei_estatus_len(estatus);
+ cache_len = GHES_ESTATUS_CACHE_LEN(len);
+ cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
+ if (!cache) {
+ atomic_dec(&ghes_estatus_cache_alloced);
+ return NULL;
+ }
+ cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ memcpy(cache_estatus, estatus, len);
+ cache->estatus_len = len;
+ atomic_set(&cache->count, 0);
+ cache->generic = generic;
+ cache->time_in = sched_clock();
+ return cache;
+}
+
+static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
+{
+ u32 len;
+
+ len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
+ len = GHES_ESTATUS_CACHE_LEN(len);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
+ atomic_dec(&ghes_estatus_cache_alloced);
+}
+
+static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
+{
+ struct ghes_estatus_cache *cache;
+
+ cache = container_of(head, struct ghes_estatus_cache, rcu);
+ ghes_estatus_cache_free(cache);
+}
+
+static void ghes_estatus_cache_add(
+ struct acpi_hest_generic *generic,
+ struct acpi_hest_generic_status *estatus)
+{
+ int i, slot = -1, count;
+ unsigned long long now, duration, period, max_period = 0;
+ struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
+
+ new_cache = ghes_estatus_cache_alloc(generic, estatus);
+ if (new_cache == NULL)
+ return;
+ rcu_read_lock();
+ now = sched_clock();
+ for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
+ cache = rcu_dereference(ghes_estatus_caches[i]);
+ if (cache == NULL) {
+ slot = i;
+ slot_cache = NULL;
+ break;
+ }
+ duration = now - cache->time_in;
+ if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
+ slot = i;
+ slot_cache = cache;
+ break;
+ }
+ count = atomic_read(&cache->count);
+ period = duration;
+ do_div(period, (count + 1));
+ if (period > max_period) {
+ max_period = period;
+ slot = i;
+ slot_cache = cache;
+ }
+ }
+ /* new_cache must be put into array after its contents are written */
+ smp_wmb();
+ if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
+ slot_cache, new_cache) == slot_cache) {
+ if (slot_cache)
+ call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
+ } else
+ ghes_estatus_cache_free(new_cache);
+ rcu_read_unlock();
}
static int ghes_proc(struct ghes *ghes)
@@ -387,9 +648,11 @@ static int ghes_proc(struct ghes *ghes)
rc = ghes_read_estatus(ghes, 0);
if (rc)
goto out;
- ghes_print_estatus(NULL, ghes);
- ghes_do_proc(ghes);
-
+ if (!ghes_estatus_cached(ghes->estatus)) {
+ if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
+ ghes_estatus_cache_add(ghes->generic, ghes->estatus);
+ }
+ ghes_do_proc(ghes->estatus);
out:
ghes_clear_estatus(ghes);
return 0;
@@ -447,6 +710,45 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
+static void ghes_proc_in_irq(struct irq_work *irq_work)
+{
+ struct llist_node *llnode, *next, *tail = NULL;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ u32 len, node_len;
+
+ /*
+ * Because the time order of estatus in list is reversed,
+ * revert it back to proper order.
+ */
+ llnode = llist_del_all(&ghes_estatus_llist);
+ while (llnode) {
+ next = llnode->next;
+ llnode->next = tail;
+ tail = llnode;
+ llnode = next;
+ }
+ llnode = tail;
+ while (llnode) {
+ next = llnode->next;
+ estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+ llnode);
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ len = apei_estatus_len(estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ ghes_do_proc(estatus);
+ if (!ghes_estatus_cached(estatus)) {
+ generic = estatus_node->generic;
+ if (ghes_print_estatus(NULL, generic, estatus))
+ ghes_estatus_cache_add(generic, estatus);
+ }
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
+ llnode = next;
+ }
+}
+
static int ghes_notify_nmi(struct notifier_block *this,
unsigned long cmd, void *data)
{
@@ -476,7 +778,8 @@ static int ghes_notify_nmi(struct notifier_block *this,
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
- ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
+ __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+ ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
panic_timeout = ghes_panic_timeout;
@@ -484,12 +787,34 @@ static int ghes_notify_nmi(struct notifier_block *this,
}
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ u32 len, node_len;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic_status *estatus;
+#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
- /* Do not print estatus because printk is not NMI safe */
- ghes_do_proc(ghes);
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ if (ghes_estatus_cached(ghes->estatus))
+ goto next;
+ /* Save estatus for further processing in IRQ context */
+ len = apei_estatus_len(ghes->estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
+ node_len);
+ if (estatus_node) {
+ estatus_node->generic = ghes->generic;
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ memcpy(estatus, ghes->estatus, len);
+ llist_add(&estatus_node->llnode, &ghes_estatus_llist);
+ }
+next:
+#endif
ghes_clear_estatus(ghes);
}
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ irq_work_queue(&ghes_proc_irq_work);
+#endif
out:
raw_spin_unlock(&ghes_nmi_lock);
@@ -504,10 +829,26 @@ static struct notifier_block ghes_notifier_nmi = {
.notifier_call = ghes_notify_nmi,
};
+static unsigned long ghes_esource_prealloc_size(
+ const struct acpi_hest_generic *generic)
+{
+ unsigned long block_length, prealloc_records, prealloc_size;
+
+ block_length = min_t(unsigned long, generic->error_block_length,
+ GHES_ESTATUS_MAX_SIZE);
+ prealloc_records = max_t(unsigned long,
+ generic->records_to_preallocate, 1);
+ prealloc_size = min_t(unsigned long, block_length * prealloc_records,
+ GHES_ESOURCE_PREALLOC_MAX_SIZE);
+
+ return prealloc_size;
+}
+
static int __devinit ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
+ unsigned long len;
int rc = -EINVAL;
generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
@@ -573,6 +914,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
mutex_unlock(&ghes_list_mutex);
break;
case ACPI_HEST_NOTIFY_NMI:
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_expand(len);
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_nmi))
register_die_notifier(&ghes_notifier_nmi);
@@ -597,6 +940,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
{
struct ghes *ghes;
struct acpi_hest_generic *generic;
+ unsigned long len;
ghes = platform_get_drvdata(ghes_dev);
generic = ghes->generic;
@@ -627,6 +971,8 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
* freed after NMI handler finishes.
*/
synchronize_rcu();
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_shrink(len);
break;
default:
BUG();
@@ -662,15 +1008,43 @@ static int __init ghes_init(void)
return -EINVAL;
}
+ if (ghes_disable) {
+ pr_info(GHES_PFX "GHES is not enabled!\n");
+ return -EINVAL;
+ }
+
+ init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
+
rc = ghes_ioremap_init();
if (rc)
goto err;
- rc = platform_driver_register(&ghes_platform_driver);
+ rc = ghes_estatus_pool_init();
if (rc)
goto err_ioremap_exit;
+ rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
+ GHES_ESTATUS_CACHE_ALLOCED_MAX);
+ if (rc)
+ goto err_pool_exit;
+
+ rc = platform_driver_register(&ghes_platform_driver);
+ if (rc)
+ goto err_pool_exit;
+
+ rc = apei_osc_setup();
+ if (rc == 0 && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
+ else if (rc == 0 && !osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
+ else if (rc && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
+ else
+ pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
+
return 0;
+err_pool_exit:
+ ghes_estatus_pool_exit();
err_ioremap_exit:
ghes_ioremap_exit();
err:
@@ -680,6 +1054,7 @@ err:
static void __exit ghes_exit(void)
{
platform_driver_unregister(&ghes_platform_driver);
+ ghes_estatus_pool_exit();
ghes_ioremap_exit();
}
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 181bc2f7bb7..05fee06f4d6 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -231,16 +231,17 @@ void __init acpi_hest_init(void)
goto err;
}
- rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
- if (rc)
- goto err;
-
- rc = hest_ghes_dev_register(ghes_count);
- if (!rc) {
- pr_info(HEST_PFX "Table parsing has been initialized.\n");
- return;
+ if (!ghes_disable) {
+ rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
+ if (rc)
+ goto err;
+ rc = hest_ghes_dev_register(ghes_count);
+ if (rc)
+ goto err;
}
+ pr_info(HEST_PFX "Table parsing has been initialized.\n");
+ return;
err:
hest_disable = 1;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 2c661353e8f..7711d94a040 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -55,6 +55,9 @@
#define ACPI_BATTERY_NOTIFY_INFO 0x81
#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
+/* Battery power unit: 0 means mW, 1 means mA */
+#define ACPI_BATTERY_POWER_UNIT_MA 1
+
#define _COMPONENT ACPI_BATTERY_COMPONENT
ACPI_MODULE_NAME("battery");
@@ -91,16 +94,12 @@ MODULE_DEVICE_TABLE(acpi, battery_device_ids);
enum {
ACPI_BATTERY_ALARM_PRESENT,
ACPI_BATTERY_XINFO_PRESENT,
- /* For buggy DSDTs that report negative 16-bit values for either
- * charging or discharging current and/or report 0 as 65536
- * due to bad math.
- */
- ACPI_BATTERY_QUIRK_SIGNED16_CURRENT,
ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
};
struct acpi_battery {
struct mutex lock;
+ struct mutex sysfs_lock;
struct power_supply bat;
struct acpi_device *device;
struct notifier_block pm_nb;
@@ -301,7 +300,8 @@ static enum power_supply_property energy_battery_props[] = {
#ifdef CONFIG_ACPI_PROCFS_POWER
inline char *acpi_battery_units(struct acpi_battery *battery)
{
- return (battery->power_unit)?"mA":"mW";
+ return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
+ "mA" : "mW";
}
#endif
@@ -461,9 +461,17 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->update_time = jiffies;
kfree(buffer.pointer);
- if (test_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags) &&
- battery->rate_now != -1)
+ /* For buggy DSDTs that report negative 16-bit values for either
+ * charging or discharging current and/or report 0 as 65536
+ * due to bad math.
+ */
+ if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA &&
+ battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
+ (s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
+ printk_once(KERN_WARNING FW_BUG "battery: (dis)charge rate"
+ " invalid.\n");
+ }
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
@@ -544,7 +552,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
{
int result;
- if (battery->power_unit) {
+ if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
battery->bat.properties = charge_battery_props;
battery->bat.num_properties =
ARRAY_SIZE(charge_battery_props);
@@ -566,18 +574,16 @@ static int sysfs_add_battery(struct acpi_battery *battery)
static void sysfs_remove_battery(struct acpi_battery *battery)
{
- if (!battery->bat.dev)
+ mutex_lock(&battery->sysfs_lock);
+ if (!battery->bat.dev) {
+ mutex_unlock(&battery->sysfs_lock);
return;
+ }
+
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
battery->bat.dev = NULL;
-}
-
-static void acpi_battery_quirks(struct acpi_battery *battery)
-{
- if (dmi_name_in_vendors("Acer") && battery->power_unit) {
- set_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags);
- }
+ mutex_unlock(&battery->sysfs_lock);
}
/*
@@ -592,7 +598,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
*
* Handle this correctly so that they won't break userspace.
*/
-static void acpi_battery_quirks2(struct acpi_battery *battery)
+static void acpi_battery_quirks(struct acpi_battery *battery)
{
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
return ;
@@ -623,13 +629,15 @@ static int acpi_battery_update(struct acpi_battery *battery)
result = acpi_battery_get_info(battery);
if (result)
return result;
- acpi_battery_quirks(battery);
acpi_battery_init_alarm(battery);
}
- if (!battery->bat.dev)
- sysfs_add_battery(battery);
+ if (!battery->bat.dev) {
+ result = sysfs_add_battery(battery);
+ if (result)
+ return result;
+ }
result = acpi_battery_get_state(battery);
- acpi_battery_quirks2(battery);
+ acpi_battery_quirks(battery);
return result;
}
@@ -863,7 +871,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
}, \
}
-static struct battery_file {
+static const struct battery_file {
struct file_operations ops;
mode_t mode;
const char *name;
@@ -948,9 +956,12 @@ static int battery_notify(struct notifier_block *nb,
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
switch (mode) {
+ case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
- sysfs_remove_battery(battery);
- sysfs_add_battery(battery);
+ if (battery->bat.dev) {
+ sysfs_remove_battery(battery);
+ sysfs_add_battery(battery);
+ }
break;
}
@@ -972,28 +983,38 @@ static int acpi_battery_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
mutex_init(&battery->lock);
+ mutex_init(&battery->sysfs_lock);
if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
"_BIX", &handle)))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
- acpi_battery_update(battery);
+ result = acpi_battery_update(battery);
+ if (result)
+ goto fail;
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
#endif
- if (!result) {
- printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
- ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
- device->status.battery_present ? "present" : "absent");
- } else {
+ if (result) {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
- kfree(battery);
+ goto fail;
}
+ printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
+ device->status.battery_present ? "present" : "absent");
+
battery->pm_nb.notifier_call = battery_notify;
register_pm_notifier(&battery->pm_nb);
return result;
+
+fail:
+ sysfs_remove_battery(battery);
+ mutex_destroy(&battery->lock);
+ mutex_destroy(&battery->sysfs_lock);
+ kfree(battery);
+ return result;
}
static int acpi_battery_remove(struct acpi_device *device, int type)
@@ -1009,6 +1030,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
+ mutex_destroy(&battery->sysfs_lock);
kfree(battery);
return 0;
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d1e06c182cd..437ddbf0c49 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -39,6 +39,7 @@
#include <linux/pci.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <acpi/apei.h>
#include <linux/dmi.h>
#include <linux/suspend.h>
@@ -519,6 +520,7 @@ out_kfree:
}
EXPORT_SYMBOL(acpi_run_osc);
+bool osc_sb_apei_support_acked;
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
static void acpi_bus_osc_support(void)
{
@@ -541,11 +543,19 @@ static void acpi_bus_osc_support(void)
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
#endif
+
+ if (!ghes_disable)
+ capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
- if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
+ if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
+ u32 *capbuf_ret = context.ret.pointer;
+ if (context.ret.length > OSC_SUPPORT_TYPE)
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT;
kfree(context.ret.pointer);
- /* do we need to check the returned cap? Sounds no */
+ }
+ /* do we need to check other returned cap? Sounds no */
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 1864ad3cf89..19a61136d84 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -77,7 +77,7 @@ struct dock_dependent_device {
struct list_head list;
struct list_head hotplug_list;
acpi_handle handle;
- struct acpi_dock_ops *ops;
+ const struct acpi_dock_ops *ops;
void *context;
};
@@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
* the dock driver after _DCK is executed.
*/
int
-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
void *context)
{
struct dock_dependent_device *dd;
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 05b44201a61..22f918bacd3 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -92,7 +92,7 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
return count;
}
-static struct file_operations acpi_ec_io_ops = {
+static const struct file_operations acpi_ec_io_ops = {
.owner = THIS_MODULE,
.open = acpi_ec_open_io,
.read = acpi_ec_read_io,
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 467479f07c1..0f0356ca1a9 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -110,7 +110,7 @@ fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
return result;
}
-static struct thermal_cooling_device_ops fan_cooling_ops = {
+static const struct thermal_cooling_device_ops fan_cooling_ops = {
.get_max_state = fan_get_max_state,
.get_cur_state = fan_get_cur_state,
.set_cur_state = fan_set_cur_state,
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 372f9b70f7f..fa32f584229 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -155,7 +155,7 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
{
if (!strcmp("Linux", interface)) {
- printk(KERN_NOTICE FW_BUG PREFIX
+ printk_once(KERN_NOTICE FW_BUG PREFIX
"BIOS _OSI(Linux) query %s%s\n",
osi_linux.enable ? "honored" : "ignored",
osi_linux.cmdline ? " via cmdline" :
@@ -237,8 +237,23 @@ void acpi_os_vprintf(const char *fmt, va_list args)
#endif
}
+#ifdef CONFIG_KEXEC
+static unsigned long acpi_rsdp;
+static int __init setup_acpi_rsdp(char *arg)
+{
+ acpi_rsdp = simple_strtoul(arg, NULL, 16);
+ return 0;
+}
+early_param("acpi_rsdp", setup_acpi_rsdp);
+#endif
+
acpi_physical_address __init acpi_os_get_root_pointer(void)
{
+#ifdef CONFIG_KEXEC
+ if (acpi_rsdp)
+ return acpi_rsdp;
+#endif
+
if (efi_enabled) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
@@ -1083,7 +1098,13 @@ struct osi_setup_entry {
bool enable;
};
-static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX];
+static struct osi_setup_entry __initdata
+ osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
+ {"Module Device", true},
+ {"Processor Device", true},
+ {"3.0 _SCP Extensions", true},
+ {"Processor Aggregator Device", true},
+};
void __init acpi_osi_setup(char *str)
{
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index f907cfbfa13..7f9eba9a0b0 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -303,6 +303,61 @@ void acpi_pci_irq_del_prt(struct pci_bus *bus)
/* --------------------------------------------------------------------------
PCI Interrupt Routing Support
-------------------------------------------------------------------------- */
+#ifdef CONFIG_X86_IO_APIC
+extern int noioapicquirk;
+extern int noioapicreroute;
+
+static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
+{
+ struct pci_bus *bus_it;
+
+ for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
+ if (!bus_it->self)
+ return 0;
+ if (bus_it->self->irq_reroute_variant)
+ return bus_it->self->irq_reroute_variant;
+ }
+ return 0;
+}
+
+/*
+ * Some chipsets (e.g. Intel 6700PXH) generate a legacy INTx when the IRQ
+ * entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does
+ * during interrupt handling). When this INTx generation cannot be disabled,
+ * we reroute these interrupts to their legacy equivalent to get rid of
+ * spurious interrupts.
+ */
+static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
+ struct acpi_prt_entry *entry)
+{
+ if (noioapicquirk || noioapicreroute) {
+ return 0;
+ } else {
+ switch (bridge_has_boot_interrupt_variant(dev->bus)) {
+ case 0:
+ /* no rerouting necessary */
+ return 0;
+ case INTEL_IRQ_REROUTE_VARIANT:
+ /*
+ * Remap according to INTx routing table in 6700PXH
+ * specs, intel order number 302628-002, section
+ * 2.15.2. Other chipsets (80332, ...) have the same
+ * mapping and are handled here as well.
+ */
+ dev_info(&dev->dev, "PCI IRQ %d -> rerouted to legacy "
+ "IRQ %d\n", entry->index,
+ (entry->index % 4) + 16);
+ entry->index = (entry->index % 4) + 16;
+ return 1;
+ default:
+ dev_warn(&dev->dev, "Cannot reroute IRQ %d to legacy "
+ "IRQ: unknown mapping\n", entry->index);
+ return -1;
+ }
+ }
+}
+#endif /* CONFIG_X86_IO_APIC */
+
static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
{
struct acpi_prt_entry *entry;
@@ -311,6 +366,9 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
entry = acpi_pci_irq_find_prt_entry(dev, pin);
if (entry) {
+#ifdef CONFIG_X86_IO_APIC
+ acpi_reroute_boot_interrupt(dev, entry);
+#endif /* CONFIG_X86_IO_APIC */
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n",
pci_name(dev), pin_name(pin)));
return entry;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d06078d660a..2672c798272 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -485,7 +485,8 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
root->secondary.end = 0xFF;
printk(KERN_WARNING FW_BUG PREFIX
"no secondary bus range in _CRS\n");
- status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN,
+ NULL, &bus);
if (ACPI_SUCCESS(status))
root->secondary.start = bus;
else if (status == AE_NOT_FOUND)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 79cb6533289..870550d6a4b 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -244,7 +244,7 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
return result;
}
-struct thermal_cooling_device_ops processor_cooling_ops = {
+const struct thermal_cooling_device_ops processor_cooling_ops = {
.get_max_state = processor_get_max_state,
.get_cur_state = processor_get_cur_state,
.set_cur_state = processor_set_cur_state,
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 50658ff887d..6e36d0c0057 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -130,6 +130,9 @@ struct acpi_sbs {
#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
+static int acpi_sbs_remove(struct acpi_device *device, int type);
+static int acpi_battery_get_state(struct acpi_battery *battery);
+
static inline int battery_scale(int log)
{
int scale = 1;
@@ -195,6 +198,8 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT)
return -ENODEV;
+
+ acpi_battery_get_state(battery);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (battery->rate_now < 0)
@@ -225,11 +230,17 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_POWER_NOW:
val->intval = abs(battery->rate_now) *
acpi_battery_ipscale(battery) * 1000;
+ val->intval *= (acpi_battery_mode(battery)) ?
+ (battery->voltage_now *
+ acpi_battery_vscale(battery) / 1000) : 1;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_POWER_AVG:
val->intval = abs(battery->rate_avg) *
acpi_battery_ipscale(battery) * 1000;
+ val->intval *= (acpi_battery_mode(battery)) ?
+ (battery->voltage_now *
+ acpi_battery_vscale(battery) / 1000) : 1;
break;
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = battery->state_of_charge;
@@ -903,8 +914,6 @@ static void acpi_sbs_callback(void *context)
}
}
-static int acpi_sbs_remove(struct acpi_device *device, int type);
-
static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6c949602cbd..3ed80b2ca90 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -428,6 +428,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
},
},
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI DELUXE",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI Premium",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 77255f250db..c538d0ef10f 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -149,12 +149,12 @@ static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
return result;
}
-static struct kernel_param_ops param_ops_debug_layer = {
+static const struct kernel_param_ops param_ops_debug_layer = {
.set = param_set_uint,
.get = param_get_debug_layer,
};
-static struct kernel_param_ops param_ops_debug_level = {
+static const struct kernel_param_ops param_ops_debug_level = {
.set = param_set_uint,
.get = param_get_debug_level,
};
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 2607e17b520..48fbc647b17 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -812,7 +812,7 @@ acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
thermal_zone_unbind_cooling_device);
}
-static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.bind = acpi_thermal_bind_cooling_device,
.unbind = acpi_thermal_unbind_cooling_device,
.get_temp = thermal_get_temp,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index ada4b4d9bdc..08a44b532f7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -307,7 +307,7 @@ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long st
return acpi_video_device_lcd_set_level(video, level);
}
-static struct thermal_cooling_device_ops video_cooling_ops = {
+static const struct thermal_cooling_device_ops video_cooling_ops = {
.get_max_state = video_get_max_state,
.get_cur_state = video_get_cur_state,
.set_cur_state = video_set_cur_state,
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index e0a5b555cee..bb7c5f1085c 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -218,12 +218,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
ata_acpi_uevent(dev->link->ap, dev, event);
}
-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
.handler = ata_acpi_dev_notify_dock,
.uevent = ata_acpi_dev_uevent,
};
-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
.handler = ata_acpi_ap_notify_dock,
.uevent = ata_acpi_ap_uevent,
};
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b89fffc1d77..33e1bed68fd 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -166,7 +166,7 @@ static int create_path(const char *nodepath)
{
char *path;
char *s;
- int err;
+ int err = 0;
/* parent directories do not exist, create them */
path = kstrdup(nodepath, GFP_KERNEL);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index be8714aa9dd..e18566a0fed 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -80,7 +80,6 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
int pm_genpd_poweron(struct generic_pm_domain *genpd)
{
struct generic_pm_domain *parent = genpd->parent;
- DEFINE_WAIT(wait);
int ret = 0;
start:
@@ -112,7 +111,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
}
if (genpd->power_on) {
- int ret = genpd->power_on(genpd);
+ ret = genpd->power_on(genpd);
if (ret)
goto out;
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8dc247c974a..acb3f83b807 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -226,11 +226,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
callback = NULL;
if (callback) {
- spin_unlock_irq(&dev->power.lock);
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
callback(dev);
- spin_lock_irq(&dev->power.lock);
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
}
dev->power.idle_notification = false;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 49502bc5360..423fd56bf61 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -616,5 +616,16 @@ config MSM_SMD_PKT
Enables userspace clients to read and write to some packet SMD
ports via device interface for MSM chipset.
+config TILE_SROM
+ bool "Character-device access via hypervisor to the Tilera SPI ROM"
+ depends on TILE
+ default y
+ ---help---
+ This device provides character-level read-write access
+ to the SROM, typically via the "0", "1", and "2" devices
+ in /dev/srom/. The Tilera hypervisor makes the flash
+ device appear much like a simple EEPROM, and knows
+ how to partition a single ROM for multiple purposes.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7a00672bd85..32762ba769c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -63,3 +63,5 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o
obj-$(CONFIG_JS_RTC) += js-rtc.o
js-rtc-y = rtc.o
+
+obj-$(CONFIG_TILE_SROM) += tile-srom.o
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index fca0c51bbc9..810aff9e750 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -147,6 +147,14 @@ static int __init ramoops_probe(struct platform_device *pdev)
cxt->phys_addr = pdata->mem_address;
cxt->record_size = pdata->record_size;
cxt->dump_oops = pdata->dump_oops;
+ /*
+ * Update the module parameter variables as well so they are visible
+ * through /sys/module/ramoops/parameters/
+ */
+ mem_size = pdata->mem_size;
+ mem_address = pdata->mem_address;
+ record_size = pdata->record_size;
+ dump_oops = pdata->dump_oops;
if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
pr_err("request mem region failed\n");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 729281961f2..c35a785005b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1300,345 +1300,14 @@ ctl_table random_table[] = {
};
#endif /* CONFIG_SYSCTL */
-/********************************************************************
- *
- * Random functions for networking
- *
- ********************************************************************/
-
-/*
- * TCP initial sequence number picking. This uses the random number
- * generator to pick an initial secret value. This value is hashed
- * along with the TCP endpoint information to provide a unique
- * starting point for each pair of TCP endpoints. This defeats
- * attacks which rely on guessing the initial TCP sequence number.
- * This algorithm was suggested by Steve Bellovin.
- *
- * Using a very strong hash was taking an appreciable amount of the total
- * TCP connection establishment time, so this is a weaker hash,
- * compensated for by changing the secret periodically.
- */
-
-/* F, G and H are basic MD4 functions: selection, majority, parity */
-#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
-#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
-#define H(x, y, z) ((x) ^ (y) ^ (z))
-
-/*
- * The generic round function. The application is so specific that
- * we don't bother protecting all the arguments with parens, as is generally
- * good macro practice, in favor of extra legibility.
- * Rotation is separate from addition to prevent recomputation
- */
-#define ROUND(f, a, b, c, d, x, s) \
- (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
-#define K1 0
-#define K2 013240474631UL
-#define K3 015666365641UL
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-
-static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
-{
- __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
-
- /* Round 1 */
- ROUND(F, a, b, c, d, in[ 0] + K1, 3);
- ROUND(F, d, a, b, c, in[ 1] + K1, 7);
- ROUND(F, c, d, a, b, in[ 2] + K1, 11);
- ROUND(F, b, c, d, a, in[ 3] + K1, 19);
- ROUND(F, a, b, c, d, in[ 4] + K1, 3);
- ROUND(F, d, a, b, c, in[ 5] + K1, 7);
- ROUND(F, c, d, a, b, in[ 6] + K1, 11);
- ROUND(F, b, c, d, a, in[ 7] + K1, 19);
- ROUND(F, a, b, c, d, in[ 8] + K1, 3);
- ROUND(F, d, a, b, c, in[ 9] + K1, 7);
- ROUND(F, c, d, a, b, in[10] + K1, 11);
- ROUND(F, b, c, d, a, in[11] + K1, 19);
-
- /* Round 2 */
- ROUND(G, a, b, c, d, in[ 1] + K2, 3);
- ROUND(G, d, a, b, c, in[ 3] + K2, 5);
- ROUND(G, c, d, a, b, in[ 5] + K2, 9);
- ROUND(G, b, c, d, a, in[ 7] + K2, 13);
- ROUND(G, a, b, c, d, in[ 9] + K2, 3);
- ROUND(G, d, a, b, c, in[11] + K2, 5);
- ROUND(G, c, d, a, b, in[ 0] + K2, 9);
- ROUND(G, b, c, d, a, in[ 2] + K2, 13);
- ROUND(G, a, b, c, d, in[ 4] + K2, 3);
- ROUND(G, d, a, b, c, in[ 6] + K2, 5);
- ROUND(G, c, d, a, b, in[ 8] + K2, 9);
- ROUND(G, b, c, d, a, in[10] + K2, 13);
-
- /* Round 3 */
- ROUND(H, a, b, c, d, in[ 3] + K3, 3);
- ROUND(H, d, a, b, c, in[ 7] + K3, 9);
- ROUND(H, c, d, a, b, in[11] + K3, 11);
- ROUND(H, b, c, d, a, in[ 2] + K3, 15);
- ROUND(H, a, b, c, d, in[ 6] + K3, 3);
- ROUND(H, d, a, b, c, in[10] + K3, 9);
- ROUND(H, c, d, a, b, in[ 1] + K3, 11);
- ROUND(H, b, c, d, a, in[ 5] + K3, 15);
- ROUND(H, a, b, c, d, in[ 9] + K3, 3);
- ROUND(H, d, a, b, c, in[ 0] + K3, 9);
- ROUND(H, c, d, a, b, in[ 4] + K3, 11);
- ROUND(H, b, c, d, a, in[ 8] + K3, 15);
-
- return buf[1] + b; /* "most hashed" word */
- /* Alternative: return sum of all words? */
-}
-#endif
-
-#undef ROUND
-#undef F
-#undef G
-#undef H
-#undef K1
-#undef K2
-#undef K3
-
-/* This should not be decreased so low that ISNs wrap too fast. */
-#define REKEY_INTERVAL (300 * HZ)
-/*
- * Bit layout of the tcp sequence numbers (before adding current time):
- * bit 24-31: increased after every key exchange
- * bit 0-23: hash(source,dest)
- *
- * The implementation is similar to the algorithm described
- * in the Appendix of RFC 1185, except that
- * - it uses a 1 MHz clock instead of a 250 kHz clock
- * - it performs a rekey every 5 minutes, which is equivalent
- * to a (source,dest) tulple dependent forward jump of the
- * clock by 0..2^(HASH_BITS+1)
- *
- * Thus the average ISN wraparound time is 68 minutes instead of
- * 4.55 hours.
- *
- * SMP cleanup and lock avoidance with poor man's RCU.
- * Manfred Spraul <manfred@colorfullife.com>
- *
- */
-#define COUNT_BITS 8
-#define COUNT_MASK ((1 << COUNT_BITS) - 1)
-#define HASH_BITS 24
-#define HASH_MASK ((1 << HASH_BITS) - 1)
+static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
-static struct keydata {
- __u32 count; /* already shifted to the final position */
- __u32 secret[12];
-} ____cacheline_aligned ip_keydata[2];
-
-static unsigned int ip_cnt;
-
-static void rekey_seq_generator(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
-
-/*
- * Lock avoidance:
- * The ISN generation runs lockless - it's just a hash over random data.
- * State changes happen every 5 minutes when the random key is replaced.
- * Synchronization is performed by having two copies of the hash function
- * state and rekey_seq_generator always updates the inactive copy.
- * The copy is then activated by updating ip_cnt.
- * The implementation breaks down if someone blocks the thread
- * that processes SYN requests for more than 5 minutes. Should never
- * happen, and even if that happens only a not perfectly compliant
- * ISN is generated, nothing fatal.
- */
-static void rekey_seq_generator(struct work_struct *work)
+static int __init random_int_secret_init(void)
{
- struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
-
- get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
- keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
- smp_wmb();
- ip_cnt++;
- schedule_delayed_work(&rekey_work,
- round_jiffies_relative(REKEY_INTERVAL));
-}
-
-static inline struct keydata *get_keyptr(void)
-{
- struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
-
- smp_rmb();
-
- return keyptr;
-}
-
-static __init int seqgen_init(void)
-{
- rekey_seq_generator(NULL);
+ get_random_bytes(random_int_secret, sizeof(random_int_secret));
return 0;
}
-late_initcall(seqgen_init);
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
- __be16 sport, __be16 dport)
-{
- __u32 seq;
- __u32 hash[12];
- struct keydata *keyptr = get_keyptr();
-
- /* The procedure is the same as for IPv4, but addresses are longer.
- * Thus we must use twothirdsMD4Transform.
- */
-
- memcpy(hash, saddr, 16);
- hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
- memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
-
- seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
- seq += keyptr->count;
-
- seq += ktime_to_ns(ktime_get_real());
-
- return seq;
-}
-EXPORT_SYMBOL(secure_tcpv6_sequence_number);
-#endif
-
-/* The code below is shamelessly stolen from secure_tcp_sequence_number().
- * All blames to Andrey V. Savochkin <saw@msu.ru>.
- */
-__u32 secure_ip_id(__be32 daddr)
-{
- struct keydata *keyptr;
- __u32 hash[4];
-
- keyptr = get_keyptr();
-
- /*
- * Pick a unique starting offset for each IP destination.
- * The dest ip address is placed in the starting vector,
- * which is then hashed with random data.
- */
- hash[0] = (__force __u32)daddr;
- hash[1] = keyptr->secret[9];
- hash[2] = keyptr->secret[10];
- hash[3] = keyptr->secret[11];
-
- return half_md4_transform(hash, keyptr->secret);
-}
-
-__u32 secure_ipv6_id(const __be32 daddr[4])
-{
- const struct keydata *keyptr;
- __u32 hash[4];
-
- keyptr = get_keyptr();
-
- hash[0] = (__force __u32)daddr[0];
- hash[1] = (__force __u32)daddr[1];
- hash[2] = (__force __u32)daddr[2];
- hash[3] = (__force __u32)daddr[3];
-
- return half_md4_transform(hash, keyptr->secret);
-}
-
-#ifdef CONFIG_INET
-
-__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport)
-{
- __u32 seq;
- __u32 hash[4];
- struct keydata *keyptr = get_keyptr();
-
- /*
- * Pick a unique starting offset for each TCP connection endpoints
- * (saddr, daddr, sport, dport).
- * Note that the words are placed into the starting vector, which is
- * then mixed with a partial MD4 over random data.
- */
- hash[0] = (__force u32)saddr;
- hash[1] = (__force u32)daddr;
- hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
- hash[3] = keyptr->secret[11];
-
- seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
- seq += keyptr->count;
- /*
- * As close as possible to RFC 793, which
- * suggests using a 250 kHz clock.
- * Further reading shows this assumes 2 Mb/s networks.
- * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
- * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
- * we also need to limit the resolution so that the u32 seq
- * overlaps less than one time per MSL (2 minutes).
- * Choosing a clock of 64 ns period is OK. (period of 274 s)
- */
- seq += ktime_to_ns(ktime_get_real()) >> 6;
-
- return seq;
-}
-
-/* Generate secure starting point for ephemeral IPV4 transport port search */
-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
-{
- struct keydata *keyptr = get_keyptr();
- u32 hash[4];
-
- /*
- * Pick a unique starting offset for each ephemeral port search
- * (saddr, daddr, dport) and 48bits of random data.
- */
- hash[0] = (__force u32)saddr;
- hash[1] = (__force u32)daddr;
- hash[2] = (__force u32)dport ^ keyptr->secret[10];
- hash[3] = keyptr->secret[11];
-
- return half_md4_transform(hash, keyptr->secret);
-}
-EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
- __be16 dport)
-{
- struct keydata *keyptr = get_keyptr();
- u32 hash[12];
-
- memcpy(hash, saddr, 16);
- hash[4] = (__force u32)dport;
- memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
-
- return twothirdsMD4Transform((const __u32 *)daddr, hash);
-}
-#endif
-
-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
-/* Similar to secure_tcp_sequence_number but generate a 48 bit value
- * bit's 32-47 increase every key exchange
- * 0-31 hash(source, dest)
- */
-u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport)
-{
- u64 seq;
- __u32 hash[4];
- struct keydata *keyptr = get_keyptr();
-
- hash[0] = (__force u32)saddr;
- hash[1] = (__force u32)daddr;
- hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
- hash[3] = keyptr->secret[11];
-
- seq = half_md4_transform(hash, keyptr->secret);
- seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
-
- seq += ktime_to_ns(ktime_get_real());
- seq &= (1ull << 48) - 1;
-
- return seq;
-}
-EXPORT_SYMBOL(secure_dccp_sequence_number);
-#endif
-
-#endif /* CONFIG_INET */
-
+late_initcall(random_int_secret_init);
/*
* Get a random word for internal kernel use only. Similar to urandom but
@@ -1646,17 +1315,15 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
*/
-DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
+DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
unsigned int get_random_int(void)
{
- struct keydata *keyptr;
__u32 *hash = get_cpu_var(get_random_int_hash);
- int ret;
+ unsigned int ret;
- keyptr = get_keyptr();
hash[0] += current->pid + jiffies + get_cycles();
-
- ret = half_md4_transform(hash, keyptr->secret);
+ md5_transform(hash, random_int_secret);
+ ret = hash[0];
put_cpu_var(get_random_int_hash);
return ret;
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
new file mode 100644
index 00000000000..cf3ee008dca
--- /dev/null
+++ b/drivers/char/tile-srom.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * SPI Flash ROM driver
+ *
+ * This source code is derived from code provided in "Linux Device
+ * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and
+ * Greg Kroah-Hartman, published by O'Reilly Media, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/proc_fs.h>
+#include <linux/fcntl.h> /* O_ACCMODE */
+#include <linux/aio.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <hv/hypervisor.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <hv/drv_srom_intf.h>
+
+/*
+ * Size of our hypervisor I/O requests. We break up large transfers
+ * so that we don't spend large uninterrupted spans of time in the
+ * hypervisor. Erasing an SROM sector takes a significant fraction of
+ * a second, so if we allowed the user to, say, do one I/O to write the
+ * entire ROM, we'd get soft lockup timeouts, or worse.
+ */
+#define SROM_CHUNK_SIZE ((size_t)4096)
+
+/*
+ * When hypervisor is busy (e.g. erasing), poll the status periodically.
+ */
+
+/*
+ * Interval to poll the state in msec
+ */
+#define SROM_WAIT_TRY_INTERVAL 20
+
+/*
+ * Maximum times to poll the state
+ */
+#define SROM_MAX_WAIT_TRY_TIMES 1000
+
+struct srom_dev {
+ int hv_devhdl; /* Handle for hypervisor device */
+ u32 total_size; /* Size of this device */
+ u32 sector_size; /* Size of a sector */
+ u32 page_size; /* Size of a page */
+ struct mutex lock; /* Allow only one accessor at a time */
+};
+
+static int srom_major; /* Dynamic major by default */
+module_param(srom_major, int, 0);
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+static int srom_devs; /* Number of SROM partitions */
+static struct cdev srom_cdev;
+static struct class *srom_class;
+static struct srom_dev *srom_devices;
+
+/*
+ * Handle calling the hypervisor and managing EAGAIN/EBUSY.
+ */
+
+static ssize_t _srom_read(int hv_devhdl, void *buf,
+ loff_t off, size_t count)
+{
+ int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
+ for (;;) {
+ retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf,
+ count, off);
+ if (retval >= 0)
+ return retval;
+ if (retval == HV_EAGAIN)
+ continue;
+ if (retval == HV_EBUSY && --retries > 0) {
+ msleep(SROM_WAIT_TRY_INTERVAL);
+ continue;
+ }
+ pr_err("_srom_read: error %d\n", retval);
+ return -EIO;
+ }
+}
+
+static ssize_t _srom_write(int hv_devhdl, const void *buf,
+ loff_t off, size_t count)
+{
+ int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
+ for (;;) {
+ retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf,
+ count, off);
+ if (retval >= 0)
+ return retval;
+ if (retval == HV_EAGAIN)
+ continue;
+ if (retval == HV_EBUSY && --retries > 0) {
+ msleep(SROM_WAIT_TRY_INTERVAL);
+ continue;
+ }
+ pr_err("_srom_write: error %d\n", retval);
+ return -EIO;
+ }
+}
+
+/**
+ * srom_open() - Device open routine.
+ * @inode: Inode for this device.
+ * @filp: File for this specific open of the device.
+ *
+ * Returns zero, or an error code.
+ */
+static int srom_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = &srom_devices[iminor(inode)];
+ return 0;
+}
+
+
+/**
+ * srom_release() - Device release routine.
+ * @inode: Inode for this device.
+ * @filp: File for this specific open of the device.
+ *
+ * Returns zero, or an error code.
+ */
+static int srom_release(struct inode *inode, struct file *filp)
+{
+ struct srom_dev *srom = filp->private_data;
+ char dummy;
+
+ /* Make sure we've flushed anything written to the ROM. */
+ mutex_lock(&srom->lock);
+ if (srom->hv_devhdl >= 0)
+ _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1);
+ mutex_unlock(&srom->lock);
+
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+
+/**
+ * srom_read() - Read data from the device.
+ * @filp: File for this specific open of the device.
+ * @buf: User's data buffer.
+ * @count: Number of bytes requested.
+ * @f_pos: File position.
+ *
+ * Returns number of bytes read, or an error code.
+ */
+static ssize_t srom_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int retval = 0;
+ void *kernbuf;
+ struct srom_dev *srom = filp->private_data;
+
+ kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
+ if (!kernbuf)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&srom->lock)) {
+ retval = -ERESTARTSYS;
+ kfree(kernbuf);
+ return retval;
+ }
+
+ while (count) {
+ int hv_retval;
+ int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
+
+ hv_retval = _srom_read(srom->hv_devhdl, kernbuf,
+ *f_pos, bytes_this_pass);
+ if (hv_retval > 0) {
+ if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
+ retval = -EFAULT;
+ break;
+ }
+ } else if (hv_retval <= 0) {
+ if (retval == 0)
+ retval = hv_retval;
+ break;
+ }
+
+ retval += hv_retval;
+ *f_pos += hv_retval;
+ buf += hv_retval;
+ count -= hv_retval;
+ }
+
+ mutex_unlock(&srom->lock);
+ kfree(kernbuf);
+
+ return retval;
+}
+
+/**
+ * srom_write() - Write data to the device.
+ * @filp: File for this specific open of the device.
+ * @buf: User's data buffer.
+ * @count: Number of bytes requested.
+ * @f_pos: File position.
+ *
+ * Returns number of bytes written, or an error code.
+ */
+static ssize_t srom_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int retval = 0;
+ void *kernbuf;
+ struct srom_dev *srom = filp->private_data;
+
+ kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
+ if (!kernbuf)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&srom->lock)) {
+ retval = -ERESTARTSYS;
+ kfree(kernbuf);
+ return retval;
+ }
+
+ while (count) {
+ int hv_retval;
+ int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
+
+ if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) {
+ retval = -EFAULT;
+ break;
+ }
+
+ hv_retval = _srom_write(srom->hv_devhdl, kernbuf,
+ *f_pos, bytes_this_pass);
+ if (hv_retval <= 0) {
+ if (retval == 0)
+ retval = hv_retval;
+ break;
+ }
+
+ retval += hv_retval;
+ *f_pos += hv_retval;
+ buf += hv_retval;
+ count -= hv_retval;
+ }
+
+ mutex_unlock(&srom->lock);
+ kfree(kernbuf);
+
+ return retval;
+}
+
+/* Provide our own implementation so we can use srom->total_size. */
+loff_t srom_llseek(struct file *filp, loff_t offset, int origin)
+{
+ struct srom_dev *srom = filp->private_data;
+
+ if (mutex_lock_interruptible(&srom->lock))
+ return -ERESTARTSYS;
+
+ switch (origin) {
+ case SEEK_END:
+ offset += srom->total_size;
+ break;
+ case SEEK_CUR:
+ offset += filp->f_pos;
+ break;
+ }
+
+ if (offset < 0 || offset > srom->total_size) {
+ offset = -EINVAL;
+ } else {
+ filp->f_pos = offset;
+ filp->f_version = 0;
+ }
+
+ mutex_unlock(&srom->lock);
+
+ return offset;
+}
+
+static ssize_t total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srom_dev *srom = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", srom->total_size);
+}
+
+static ssize_t sector_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srom_dev *srom = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", srom->sector_size);
+}
+
+static ssize_t page_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srom_dev *srom = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", srom->page_size);
+}
+
+static struct device_attribute srom_dev_attrs[] = {
+ __ATTR(total_size, S_IRUGO, total_show, NULL),
+ __ATTR(sector_size, S_IRUGO, sector_show, NULL),
+ __ATTR(page_size, S_IRUGO, page_show, NULL),
+ __ATTR_NULL
+};
+
+static char *srom_devnode(struct device *dev, mode_t *mode)
+{
+ *mode = S_IRUGO | S_IWUSR;
+ return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
+}
+
+/*
+ * The fops
+ */
+static const struct file_operations srom_fops = {
+ .owner = THIS_MODULE,
+ .llseek = srom_llseek,
+ .read = srom_read,
+ .write = srom_write,
+ .open = srom_open,
+ .release = srom_release,
+};
+
+/**
+ * srom_setup_minor() - Initialize per-minor information.
+ * @srom: Per-device SROM state.
+ * @index: Device to set up.
+ */
+static int srom_setup_minor(struct srom_dev *srom, int index)
+{
+ struct device *dev;
+ int devhdl = srom->hv_devhdl;
+
+ mutex_init(&srom->lock);
+
+ if (_srom_read(devhdl, &srom->total_size,
+ SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0)
+ return -EIO;
+ if (_srom_read(devhdl, &srom->sector_size,
+ SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0)
+ return -EIO;
+ if (_srom_read(devhdl, &srom->page_size,
+ SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0)
+ return -EIO;
+
+ dev = device_create(srom_class, &platform_bus,
+ MKDEV(srom_major, index), srom, "%d", index);
+ return IS_ERR(dev) ? PTR_ERR(dev) : 0;
+}
+
+/** srom_init() - Initialize the driver's module. */
+static int srom_init(void)
+{
+ int result, i;
+ dev_t dev = MKDEV(srom_major, 0);
+
+ /*
+ * Start with a plausible number of partitions; the krealloc() call
+ * below will yield about log(srom_devs) additional allocations.
+ */
+ srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL);
+
+ /* Discover the number of srom partitions. */
+ for (i = 0; ; i++) {
+ int devhdl;
+ char buf[20];
+ struct srom_dev *new_srom_devices =
+ krealloc(srom_devices, (i+1) * sizeof(struct srom_dev),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!new_srom_devices) {
+ result = -ENOMEM;
+ goto fail_mem;
+ }
+ srom_devices = new_srom_devices;
+ sprintf(buf, "srom/0/%d", i);
+ devhdl = hv_dev_open((HV_VirtAddr)buf, 0);
+ if (devhdl < 0) {
+ if (devhdl != HV_ENODEV)
+ pr_notice("srom/%d: hv_dev_open failed: %d.\n",
+ i, devhdl);
+ break;
+ }
+ srom_devices[i].hv_devhdl = devhdl;
+ }
+ srom_devs = i;
+
+ /* Bail out early if we have no partitions at all. */
+ if (srom_devs == 0) {
+ result = -ENODEV;
+ goto fail_mem;
+ }
+
+ /* Register our major, and accept a dynamic number. */
+ if (srom_major)
+ result = register_chrdev_region(dev, srom_devs, "srom");
+ else {
+ result = alloc_chrdev_region(&dev, 0, srom_devs, "srom");
+ srom_major = MAJOR(dev);
+ }
+ if (result < 0)
+ goto fail_mem;
+
+ /* Register a character device. */
+ cdev_init(&srom_cdev, &srom_fops);
+ srom_cdev.owner = THIS_MODULE;
+ srom_cdev.ops = &srom_fops;
+ result = cdev_add(&srom_cdev, dev, srom_devs);
+ if (result < 0)
+ goto fail_chrdev;
+
+ /* Create a sysfs class. */
+ srom_class = class_create(THIS_MODULE, "srom");
+ if (IS_ERR(srom_class)) {
+ result = PTR_ERR(srom_class);
+ goto fail_cdev;
+ }
+ srom_class->dev_attrs = srom_dev_attrs;
+ srom_class->devnode = srom_devnode;
+
+ /* Do per-partition initialization */
+ for (i = 0; i < srom_devs; i++) {
+ result = srom_setup_minor(srom_devices + i, i);
+ if (result < 0)
+ goto fail_class;
+ }
+
+ return 0;
+
+fail_class:
+ for (i = 0; i < srom_devs; i++)
+ device_destroy(srom_class, MKDEV(srom_major, i));
+ class_destroy(srom_class);
+fail_cdev:
+ cdev_del(&srom_cdev);
+fail_chrdev:
+ unregister_chrdev_region(dev, srom_devs);
+fail_mem:
+ kfree(srom_devices);
+ return result;
+}
+
+/** srom_cleanup() - Clean up the driver's module. */
+static void srom_cleanup(void)
+{
+ int i;
+ for (i = 0; i < srom_devs; i++)
+ device_destroy(srom_class, MKDEV(srom_major, i));
+ class_destroy(srom_class);
+ cdev_del(&srom_cdev);
+ unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs);
+ kfree(srom_devices);
+}
+
+module_init(srom_init);
+module_exit(srom_cleanup);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7fc2f108f49..3f4051a7c5a 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -80,7 +80,7 @@ enum tis_defaults {
static LIST_HEAD(tis_chips);
static DEFINE_SPINLOCK(tis_lock);
-#ifdef CONFIG_PNP
+#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int is_itpm(struct pnp_dev *dev)
{
struct acpi_device *acpi = pnp_acpi_device(dev);
@@ -93,6 +93,11 @@ static int is_itpm(struct pnp_dev *dev)
return 0;
}
+#else
+static inline int is_itpm(struct pnp_dev *dev)
+{
+ return 0;
+}
#endif
static int check_locality(struct tpm_chip *chip, int l)
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 3ee1fdb31ea..e55814bc0d0 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -57,6 +57,7 @@ void proc_fork_connector(struct task_struct *task)
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE];
struct timespec ts;
+ struct task_struct *parent;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
@@ -67,8 +68,11 @@ void proc_fork_connector(struct task_struct *task)
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->what = PROC_EVENT_FORK;
- ev->event_data.fork.parent_pid = task->real_parent->pid;
- ev->event_data.fork.parent_tgid = task->real_parent->tgid;
+ rcu_read_lock();
+ parent = rcu_dereference(task->real_parent);
+ ev->event_data.fork.parent_pid = parent->pid;
+ ev->event_data.fork.parent_tgid = parent->tgid;
+ rcu_read_unlock();
ev->event_data.fork.child_pid = task->pid;
ev->event_data.fork.child_tgid = task->tgid;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index bf5092455a8..d4c54237288 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -25,9 +25,19 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);
-static void (*pm_idle_old)(void);
static int enabled_devices;
+static int off __read_mostly;
+static int initialized __read_mostly;
+
+int cpuidle_disabled(void)
+{
+ return off;
+}
+void disable_cpuidle(void)
+{
+ off = 1;
+}
#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
static void cpuidle_kick_cpus(void)
@@ -46,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
* cpuidle_idle_call - the main idle loop
*
* NOTE: no locks or semaphores should be used here
+ * return non-zero on failure
*/
-static void cpuidle_idle_call(void)
+int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_state *target_state;
int next_state;
+ if (off)
+ return -ENODEV;
+
+ if (!initialized)
+ return -ENODEV;
+
/* check if the device is ready */
- if (!dev || !dev->enabled) {
- if (pm_idle_old)
- pm_idle_old();
- else
-#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
- default_idle();
-#else
- local_irq_enable();
-#endif
- return;
- }
+ if (!dev || !dev->enabled)
+ return -EBUSY;
#if 0
/* shows regressions, re-enable for 2.6.29 */
@@ -89,7 +97,7 @@ static void cpuidle_idle_call(void)
next_state = cpuidle_curr_governor->select(dev);
if (need_resched()) {
local_irq_enable();
- return;
+ return 0;
}
target_state = &dev->states[next_state];
@@ -114,6 +122,8 @@ static void cpuidle_idle_call(void)
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev);
+
+ return 0;
}
/**
@@ -121,10 +131,10 @@ static void cpuidle_idle_call(void)
*/
void cpuidle_install_idle_handler(void)
{
- if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
+ if (enabled_devices) {
/* Make sure all changes finished before we switch to new idle */
smp_wmb();
- pm_idle = cpuidle_idle_call;
+ initialized = 1;
}
}
@@ -133,8 +143,8 @@ void cpuidle_install_idle_handler(void)
*/
void cpuidle_uninstall_idle_handler(void)
{
- if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) {
- pm_idle = pm_idle_old;
+ if (enabled_devices) {
+ initialized = 0;
cpuidle_kick_cpus();
}
}
@@ -427,7 +437,8 @@ static int __init cpuidle_init(void)
{
int ret;
- pm_idle_old = pm_idle;
+ if (cpuidle_disabled())
+ return -ENODEV;
ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
if (ret)
@@ -438,4 +449,5 @@ static int __init cpuidle_init(void)
return 0;
}
+module_param(off, int, 0444);
core_initcall(cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 33e50d556f1..38c3fd8b9d7 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -13,6 +13,7 @@ extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
extern spinlock_t cpuidle_driver_lock;
+extern int cpuidle_disabled(void);
/* idle loop */
extern void cpuidle_install_idle_handler(void);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index fd1601e3d12..3f7e3cedd13 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -26,6 +26,9 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
if (!drv)
return -EINVAL;
+ if (cpuidle_disabled())
+ return -ENODEV;
+
spin_lock(&cpuidle_driver_lock);
if (cpuidle_curr_driver) {
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 724c164d31c..ea2f8e7aa24 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -81,6 +81,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
if (!gov || !gov->select)
return -EINVAL;
+ if (cpuidle_disabled())
+ return -ENODEV;
+
mutex_lock(&cpuidle_lock);
if (__cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
index a4af8589330..734ed0206cd 100644
--- a/drivers/dma/TODO
+++ b/drivers/dma/TODO
@@ -9,6 +9,5 @@ TODO for slave dma
- mxs-dma.c
- dw_dmac
- intel_mid_dma
- - ste_dma40
4. Check other subsystems for dma drivers and merge/move to dmaengine
5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e6d7228b147..196a7378d33 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -156,14 +156,10 @@ struct pl08x_driver_data {
#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
-/* Minimum period between work queue runs */
-#define PL08X_WQ_PERIODMIN 20
-
/* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE 0x2000
/* Maximum times we call dma_pool_alloc on this pool without freeing */
-#define PL08X_MAX_ALLOCS 0x40
#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
#define PL08X_ALIGN 8
@@ -495,10 +491,10 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
struct pl08x_lli_build_data {
struct pl08x_txd *txd;
- struct pl08x_driver_data *pl08x;
struct pl08x_bus_data srcbus;
struct pl08x_bus_data dstbus;
size_t remainder;
+ u32 lli_bus;
};
/*
@@ -551,8 +547,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
llis_va[num_llis].src = bd->srcbus.addr;
llis_va[num_llis].dst = bd->dstbus.addr;
llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
- if (bd->pl08x->lli_buses & PL08X_AHB2)
- llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
+ llis_va[num_llis].lli |= bd->lli_bus;
if (cctl & PL080_CONTROL_SRC_INCR)
bd->srcbus.addr += len;
@@ -605,9 +600,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = txd->cctl;
bd.txd = txd;
- bd.pl08x = pl08x;
bd.srcbus.addr = txd->src_addr;
bd.dstbus.addr = txd->dst_addr;
+ bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
/* Find maximum width of the source bus */
bd.srcbus.maxwidth =
@@ -622,25 +617,15 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
/* Set up the bus widths to the maximum */
bd.srcbus.buswidth = bd.srcbus.maxwidth;
bd.dstbus.buswidth = bd.dstbus.maxwidth;
- dev_vdbg(&pl08x->adev->dev,
- "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
- __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
-
/*
* Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
*/
max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
PL080_CONTROL_TRANSFER_SIZE_MASK;
- dev_vdbg(&pl08x->adev->dev,
- "%s max bytes per lli = %zu\n",
- __func__, max_bytes_per_lli);
/* We need to count this down to zero */
bd.remainder = txd->len;
- dev_vdbg(&pl08x->adev->dev,
- "%s remainder = %zu\n",
- __func__, bd.remainder);
/*
* Choose bus to align to
@@ -649,6 +634,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
*/
pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
+ dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n",
+ bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+ bd.srcbus.buswidth,
+ bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+ bd.dstbus.buswidth,
+ bd.remainder, max_bytes_per_lli);
+ dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
+ mbus == &bd.srcbus ? "src" : "dst",
+ sbus == &bd.srcbus ? "src" : "dst");
+
if (txd->len < mbus->buswidth) {
/* Less than a bus width available - send as single bytes */
while (bd.remainder) {
@@ -840,15 +835,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
{
int i;
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl");
for (i = 0; i < num_llis; i++) {
dev_vdbg(&pl08x->adev->dev,
- "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
- i,
- &llis_va[i],
- llis_va[i].src,
- llis_va[i].dst,
- llis_va[i].cctl,
- llis_va[i].lli
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, &llis_va[i], llis_va[i].src,
+ llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
);
}
}
@@ -1054,64 +1048,105 @@ pl08x_dma_tx_status(struct dma_chan *chan,
/* PrimeCell DMA extension */
struct burst_table {
- int burstwords;
+ u32 burstwords;
u32 reg;
};
static const struct burst_table burst_sizes[] = {
{
.burstwords = 256,
- .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_256,
},
{
.burstwords = 128,
- .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_128,
},
{
.burstwords = 64,
- .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_64,
},
{
.burstwords = 32,
- .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_32,
},
{
.burstwords = 16,
- .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_16,
},
{
.burstwords = 8,
- .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_8,
},
{
.burstwords = 4,
- .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_4,
},
{
- .burstwords = 1,
- .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .burstwords = 0,
+ .reg = PL080_BSIZE_1,
},
};
+/*
+ * Given the source and destination available bus masks, select which
+ * will be routed to each port. We try to have source and destination
+ * on separate ports, but always respect the allowable settings.
+ */
+static u32 pl08x_select_bus(u8 src, u8 dst)
+{
+ u32 cctl = 0;
+
+ if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
+ cctl |= PL080_CONTROL_DST_AHB2;
+ if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
+ cctl |= PL080_CONTROL_SRC_AHB2;
+
+ return cctl;
+}
+
+static u32 pl08x_cctl(u32 cctl)
+{
+ cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
+ PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
+ PL080_CONTROL_PROT_MASK);
+
+ /* Access the cell in privileged mode, non-bufferable, non-cacheable */
+ return cctl | PL080_CONTROL_PROT_SYS;
+}
+
+static u32 pl08x_width(enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return PL080_WIDTH_8BIT;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return PL080_WIDTH_16BIT;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return PL080_WIDTH_32BIT;
+ default:
+ return ~0;
+ }
+}
+
+static u32 pl08x_burst(u32 maxburst)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
+ if (burst_sizes[i].burstwords <= maxburst)
+ break;
+
+ return burst_sizes[i].reg;
+}
+
static int dma_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
- struct pl08x_channel_data *cd = plchan->cd;
enum dma_slave_buswidth addr_width;
- dma_addr_t addr;
- u32 maxburst;
+ u32 width, burst, maxburst;
u32 cctl = 0;
- int i;
if (!plchan->slave)
return -EINVAL;
@@ -1119,11 +1154,9 @@ static int dma_set_runtime_config(struct dma_chan *chan,
/* Transfer direction */
plchan->runtime_direction = config->direction;
if (config->direction == DMA_TO_DEVICE) {
- addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
} else if (config->direction == DMA_FROM_DEVICE) {
- addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
@@ -1132,46 +1165,40 @@ static int dma_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
- switch (addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- default:
+ width = pl08x_width(addr_width);
+ if (width == ~0) {
dev_err(&pl08x->adev->dev,
"bad runtime_config: alien address width\n");
return -EINVAL;
}
+ cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
+ cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
+
/*
- * Now decide on a maxburst:
* If this channel will only request single transfers, set this
* down to ONE element. Also select one element if no maxburst
* is specified.
*/
- if (plchan->cd->single || maxburst == 0) {
- cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
+ if (plchan->cd->single)
+ maxburst = 1;
+
+ burst = pl08x_burst(maxburst);
+ cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
+ cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
+
+ if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+ plchan->src_addr = config->src_addr;
+ plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
+ pl08x_select_bus(plchan->cd->periph_buses,
+ pl08x->mem_buses);
} else {
- for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
- if (burst_sizes[i].burstwords <= maxburst)
- break;
- cctl |= burst_sizes[i].reg;
+ plchan->dst_addr = config->dst_addr;
+ plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
+ pl08x_select_bus(pl08x->mem_buses,
+ plchan->cd->periph_buses);
}
- plchan->runtime_addr = addr;
-
- /* Modify the default channel data to fit PrimeCell request */
- cd->cctl = cctl;
-
dev_dbg(&pl08x->adev->dev,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x\n",
@@ -1270,23 +1297,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
return 0;
}
-/*
- * Given the source and destination available bus masks, select which
- * will be routed to each port. We try to have source and destination
- * on separate ports, but always respect the allowable settings.
- */
-static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
-{
- u32 cctl = 0;
-
- if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
- cctl |= PL080_CONTROL_DST_AHB2;
- if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
- cctl |= PL080_CONTROL_SRC_AHB2;
-
- return cctl;
-}
-
static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
unsigned long flags)
{
@@ -1338,8 +1348,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
if (pl08x->vd->dualmaster)
- txd->cctl |= pl08x_select_bus(pl08x,
- pl08x->mem_buses, pl08x->mem_buses);
+ txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
+ pl08x->mem_buses);
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
@@ -1356,7 +1366,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- u8 src_buses, dst_buses;
int ret;
/*
@@ -1390,42 +1399,22 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
txd->direction = direction;
txd->len = sgl->length;
- txd->cctl = plchan->cd->cctl &
- ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
- PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
- PL080_CONTROL_PROT_MASK);
-
- /* Access the cell in privileged mode, non-bufferable, non-cacheable */
- txd->cctl |= PL080_CONTROL_PROT_SYS;
-
if (direction == DMA_TO_DEVICE) {
txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl |= PL080_CONTROL_SRC_INCR;
+ txd->cctl = plchan->dst_cctl;
txd->src_addr = sgl->dma_address;
- if (plchan->runtime_addr)
- txd->dst_addr = plchan->runtime_addr;
- else
- txd->dst_addr = plchan->cd->addr;
- src_buses = pl08x->mem_buses;
- dst_buses = plchan->cd->periph_buses;
+ txd->dst_addr = plchan->dst_addr;
} else if (direction == DMA_FROM_DEVICE) {
txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl |= PL080_CONTROL_DST_INCR;
- if (plchan->runtime_addr)
- txd->src_addr = plchan->runtime_addr;
- else
- txd->src_addr = plchan->cd->addr;
+ txd->cctl = plchan->src_cctl;
+ txd->src_addr = plchan->src_addr;
txd->dst_addr = sgl->dma_address;
- src_buses = plchan->cd->periph_buses;
- dst_buses = pl08x->mem_buses;
} else {
dev_err(&pl08x->adev->dev,
"%s direction unsupported\n", __func__);
return NULL;
}
- txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
-
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
@@ -1676,6 +1665,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
return mask ? IRQ_HANDLED : IRQ_NONE;
}
+static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
+{
+ u32 cctl = pl08x_cctl(chan->cd->cctl);
+
+ chan->slave = true;
+ chan->name = chan->cd->bus_id;
+ chan->src_addr = chan->cd->addr;
+ chan->dst_addr = chan->cd->addr;
+ chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
+ pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
+ chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
+ pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
+}
+
/*
* Initialise the DMAC memcpy/slave channels.
* Make a local wrapper to hold required data
@@ -1707,9 +1710,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan->state = PL08X_CHAN_IDLE;
if (slave) {
- chan->slave = true;
- chan->name = pl08x->pd->slave_channels[i].bus_id;
chan->cd = &pl08x->pd->slave_channels[i];
+ pl08x_dma_slave_init(chan);
} else {
chan->cd = &pl08x->pd->memcpy_channel;
chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 36144f88d71..6a483eac7b3 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.cap_mask = pdata->cap_mask;
atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
- size = io->end - io->start + 1;
+ size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
err = -EBUSY;
goto err_kfree;
@@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
atdma->regs = NULL;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(io->start, io->end - io->start + 1);
+ release_mem_region(io->start, resource_size(io));
kfree(atdma);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index a92d95eac86..4234f416ef1 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -41,6 +41,8 @@ struct coh901318_desc {
struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
+ u32 head_config;
+ u32 head_ctrl;
};
struct coh901318_base {
@@ -661,6 +663,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
coh901318_desc_submit(cohc, cohd);
+ /* Program the transaction head */
+ coh901318_set_conf(cohc, cohd->head_config);
+ coh901318_set_ctrl(cohc, cohd->head_ctrl);
coh901318_prep_linked_list(cohc, cohd->lli);
/* start dma job on this channel */
@@ -1091,8 +1096,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
} else
goto err_direction;
- coh901318_set_conf(cohc, config);
-
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
* dma elemts required to send the entire sg list
@@ -1129,16 +1132,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
- /*
- * Set the default ctrl for the channel to the one from the lli,
- * things may have changed due to odd buffer alignment etc.
- */
- coh901318_set_ctrl(cohc, lli->control);
COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
+ cohd->head_config = config;
+ /*
+ * Set the default head ctrl for the channel to the one from the
+ * lli, things may have changed due to odd buffer alignment
+ * etc.
+ */
+ cohd->head_ctrl = lli->control;
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 48694c34d96..b48967b499d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -62,9 +62,9 @@
#include <linux/slab.h>
static DEFINE_MUTEX(dma_list_mutex);
+static DEFINE_IDR(dma_idr);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;
-static struct idr dma_idr;
/* --- sysfs implementation --- */
@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
- pr_err("dmaengine: failed to get %s: (%d)\n",
- dma_chan_name(chan), err);
+ pr_debug("dmaengine: failed to get %s: (%d)\n",
+ dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
@@ -1050,8 +1050,6 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
static int __init dma_bus_init(void)
{
- idr_init(&dma_idr);
- mutex_init(&dma_list_mutex);
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 0766c1e53b1..5d7a49bd7c2 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -902,7 +902,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
*
* Returns a valid DMA descriptor or %NULL in case of failure.
*/
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
dma_addr_t src, size_t len, unsigned long flags)
{
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 1eb60ded2f0..7bd7e98548c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1305,8 +1305,10 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_request_irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
- if (!sdma->script_addrs)
+ if (!sdma->script_addrs) {
+ ret = -ENOMEM;
goto err_alloc;
+ }
if (of_id)
pdev->id_entry = of_id->data;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index f653517ef74..8a3fdd87db9 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
return -EAGAIN;
}
device->state = SUSPENDED;
- pci_set_drvdata(pci, device);
pci_save_state(pci);
pci_disable_device(pci);
pci_set_power_state(pci, PCI_D3hot);
@@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci)
}
device->state = RUNNING;
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
- pci_set_drvdata(pci, device);
return 0;
}
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d845dc4b710..f519c93a61e 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -73,10 +73,10 @@
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
*/
-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 xor_idx_to_desc = 0xe0;
+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
+static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index fab37d1cf48..5e3a40f7994 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -72,6 +72,17 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index fd7d2b308cf..6815905a772 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1706,16 +1706,14 @@ static int __init ipu_probe(struct platform_device *pdev)
ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
/* Remap IPU common registers */
- ipu_data.reg_ipu = ioremap(mem_ipu->start,
- mem_ipu->end - mem_ipu->start + 1);
+ ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
if (!ipu_data.reg_ipu) {
ret = -ENOMEM;
goto err_ioremap_ipu;
}
/* Remap Image Converter and Image DMA Controller registers */
- ipu_data.reg_ic = ioremap(mem_ic->start,
- mem_ic->end - mem_ic->start + 1);
+ ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
if (!ipu_data.reg_ic) {
ret = -ENOMEM;
goto err_ioremap_ic;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 06f9f27dbe7..9a353c2216d 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1304,7 +1304,8 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- msp->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ msp->xor_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!msp->xor_base)
return -EBUSY;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 88aad4f5400..be641cbd36f 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
memset(mxs_chan->ccw, 0, PAGE_SIZE);
- ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
- 0, "mxs-dma", mxs_dma);
- if (ret)
- goto err_irq;
+ if (mxs_chan->chan_irq != NO_IRQ) {
+ ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
+ 0, "mxs-dma", mxs_dma);
+ if (ret)
+ goto err_irq;
+ }
ret = clk_enable(mxs_dma->clk);
if (ret)
@@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
mxs_dma_disable_chan(mxs_chan);
+ mxs_dma_reset_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
@@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = {
}, {
.name = "mxs-dma-apbx",
.driver_data = MXS_DMA_APBX,
+ }, {
+ /* end of list */
}
};
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index ff5b38f9d45..1ac8d4b580b 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -45,7 +45,8 @@
#define DMA_STATUS_MASK_BITS 0x3
#define DMA_STATUS_SHIFT_BITS 16
#define DMA_STATUS_IRQ(x) (0x1 << (x))
-#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8))
+#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
+#define DMA_STATUS2_ERR(x) (0x1 << (x))
#define DMA_DESC_WIDTH_SHIFT_BITS 12
#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
@@ -61,6 +62,9 @@
#define MAX_CHAN_NR 8
+#define DMA_MASK_CTL0_MODE 0x33333333
+#define DMA_MASK_CTL2_MODE 0x00003333
+
static unsigned int init_nr_desc_per_channel = 64;
module_param(init_nr_desc_per_channel, uint, 0644);
MODULE_PARM_DESC(init_nr_desc_per_channel,
@@ -133,6 +137,7 @@ struct pch_dma {
#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
+#define PCH_DMA_STS2 0x18
#define dma_readl(pd, name) \
readl((pd)->membase + PCH_DMA_##name)
@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
+ int pos;
+
+ if (chan->chan_id < 8)
+ pos = chan->chan_id;
+ else
+ pos = chan->chan_id + 8;
val = dma_readl(pd, CTL2);
if (enable)
- val |= 0x1 << chan->chan_id;
+ val |= 0x1 << pos;
else
- val &= ~(0x1 << chan->chan_id);
+ val &= ~(0x1 << pos);
dma_writel(pd, CTL2, val);
@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan)
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma *pd = to_pd(chan->device);
u32 val;
+ u32 mask_mode;
+ u32 mask_ctl;
if (chan->chan_id < 8) {
val = dma_readl(pd, CTL0);
+ mask_mode = DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id);
+ mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan)
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS));
+ val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
val = dma_readl(pd, CTL3);
+ mask_mode = DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch);
+ mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS));
-
+ val |= mask_ctl;
dma_writel(pd, CTL3, val);
}
@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
+ u32 mask_ctl;
+ u32 mask_dir;
if (chan->chan_id < 8) {
+ mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
+ DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL0);
-
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
-
+ val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
-
+ mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
+ DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL3);
-
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * ch));
+ val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
-
+ val |= mask_ctl;
dma_writel(pd, CTL3, val);
-
}
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
chan->chan_id, val);
}
-static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
+static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
{
struct pch_dma *pd = to_pd(pd_chan->chan.device);
u32 val;
@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
}
+static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
+{
+ struct pch_dma *pd = to_pd(pd_chan->chan.device);
+ u32 val;
+
+ val = dma_readl(pd, STS2);
+ return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
+ DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
+}
+
static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
{
- if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
+ u32 sts;
+
+ if (pd_chan->chan.chan_id < 8)
+ sts = pdc_get_status0(pd_chan);
+ else
+ sts = pdc_get_status2(pd_chan);
+
+
+ if (sts == DMA_STATUS_IDLE)
return true;
else
return false;
@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
list_splice(&tmp_list, &pd_chan->free_list);
pd_chan->descs_allocated = i;
pd_chan->completed_cookie = chan->cookie = 1;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
pdc_enable_irq(chan, 1);
@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&pd_chan->active_list));
BUG_ON(!list_empty(&pd_chan->queue));
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
list_splice_init(&pd_chan->free_list, &tmp_list);
pd_chan->descs_allocated = 0;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
pci_pool_free(pd->pool, desc, desc->txd.phys);
@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
dma_cookie_t last_completed;
int ret;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
last_completed = pd_chan->completed_cookie;
last_used = chan->cookie;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
ret = dma_async_is_complete(cookie, last_completed, last_used);
@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_for_each_entry_safe(desc, _d, &list, desc_node)
pdc_chain_complete(pd_chan, desc);
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
return 0;
}
@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid)
struct pch_dma *pd = (struct pch_dma *)devid;
struct pch_dma_chan *pd_chan;
u32 sts0;
+ u32 sts2;
int i;
- int ret = IRQ_NONE;
+ int ret0 = IRQ_NONE;
+ int ret2 = IRQ_NONE;
sts0 = dma_readl(pd, STS0);
+ sts2 = dma_readl(pd, STS2);
dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
for (i = 0; i < pd->dma.chancnt; i++) {
pd_chan = &pd->channels[i];
- if (sts0 & DMA_STATUS_IRQ(i)) {
- if (sts0 & DMA_STATUS_ERR(i))
- set_bit(0, &pd_chan->err_status);
+ if (i < 8) {
+ if (sts0 & DMA_STATUS_IRQ(i)) {
+ if (sts0 & DMA_STATUS0_ERR(i))
+ set_bit(0, &pd_chan->err_status);
- tasklet_schedule(&pd_chan->tasklet);
- ret = IRQ_HANDLED;
- }
+ tasklet_schedule(&pd_chan->tasklet);
+ ret0 = IRQ_HANDLED;
+ }
+ } else {
+ if (sts2 & DMA_STATUS_IRQ(i - 8)) {
+ if (sts2 & DMA_STATUS2_ERR(i))
+ set_bit(0, &pd_chan->err_status);
+ tasklet_schedule(&pd_chan->tasklet);
+ ret2 = IRQ_HANDLED;
+ }
+ }
}
/* clear interrupt bits in status register */
- dma_writel(pd, STS0, sts0);
+ if (ret0)
+ dma_writel(pd, STS0, sts0);
+ if (ret2)
+ dma_writel(pd, STS2, sts2);
- return ret;
+ return ret0 | ret2;
}
#ifdef CONFIG_PM
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 6abe1ec1f2c..00eee59e8b3 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -82,7 +82,7 @@ struct dma_pl330_dmac {
spinlock_t pool_lock;
/* Peripheral channels connected to this DMAC */
- struct dma_pl330_chan peripherals[0]; /* keep at end */
+ struct dma_pl330_chan *peripherals; /* keep at end */
};
struct dma_pl330_desc {
@@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- desc->req.rqtype = peri->rqtype;
- desc->req.peri = peri->peri_id;
+ if (peri) {
+ desc->req.rqtype = peri->rqtype;
+ desc->req.peri = peri->peri_id;
+ } else {
+ desc->req.rqtype = MEMTOMEM;
+ desc->req.peri = 0;
+ }
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
struct pl330_info *pi;
int burst;
- if (unlikely(!pch || !len || !peri))
+ if (unlikely(!pch || !len))
return NULL;
- if (peri->rqtype != MEMTOMEM)
+ if (peri && peri->rqtype != MEMTOMEM)
return NULL;
pi = &pch->dmac->pif;
@@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
int i, burst_size;
dma_addr_t addr;
- if (unlikely(!pch || !sgl || !sg_len))
+ if (unlikely(!pch || !sgl || !sg_len || !peri))
return NULL;
/* Make sure the direction is consistent */
@@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
struct dma_device *pd;
struct resource *res;
int i, ret, irq;
+ int num_chan;
pdat = adev->dev.platform_data;
- if (!pdat || !pdat->nr_valid_peri) {
- dev_err(&adev->dev, "platform data missing\n");
- return -ENODEV;
- }
-
/* Allocate a new DMAC and its Channels */
- pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
- + sizeof(*pdmac), GFP_KERNEL);
+ pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
@@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi = &pdmac->pif;
pi->dev = &adev->dev;
pi->pl330_data = NULL;
- pi->mcbufsz = pdat->mcbuf_sz;
+ pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
request_mem_region(res->start, resource_size(res), "dma-pl330");
@@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- for (i = 0; i < pdat->nr_valid_peri; i++) {
- struct dma_pl330_peri *peri = &pdat->peri[i];
- pch = &pdmac->peripherals[i];
+ num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
- switch (peri->rqtype) {
- case MEMTOMEM:
+ for (i = 0; i < num_chan; i++) {
+ pch = &pdmac->peripherals[i];
+ if (pdat) {
+ struct dma_pl330_peri *peri = &pdat->peri[i];
+
+ switch (peri->rqtype) {
+ case MEMTOMEM:
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ break;
+ case MEMTODEV:
+ case DEVTOMEM:
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ break;
+ default:
+ dev_err(&adev->dev, "DEVTODEV Not Supported\n");
+ continue;
+ }
+ pch->chan.private = peri;
+ } else {
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- break;
- case MEMTODEV:
- case DEVTOMEM:
- dma_cap_set(DMA_SLAVE, pd->cap_mask);
- break;
- default:
- dev_err(&adev->dev, "DEVTODEV Not Supported\n");
- continue;
+ pch->chan.private = NULL;
}
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
- pch->chan.private = peri;
pch->chan.device = pd;
pch->chan.chan_id = i;
pch->dmac = pdmac;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 02833004420..7f49235d14b 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -70,12 +70,36 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
static u16 dmaor_read(struct sh_dmae_device *shdev)
{
- return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ return __raw_readl(addr);
+ else
+ return __raw_readw(addr);
}
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
- __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ __raw_writel(data, addr);
+ else
+ __raw_writew(data, addr);
+}
+
+static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+}
+
+static u32 chcr_read(struct sh_dmae_chan *sh_dc)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
}
/*
@@ -120,7 +144,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ u32 chcr = chcr_read(sh_chan);
if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
return true; /* working */
@@ -130,8 +154,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
@@ -144,8 +167,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
@@ -169,18 +191,23 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
static void dmae_start(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ if (shdev->pdata->needs_tend_set)
+ sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
- chcr |= CHCR_DE | CHCR_IE;
- sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
+ chcr |= CHCR_DE | shdev->chcr_ie_bit;
+ chcr_write(sh_chan, chcr & ~CHCR_TE);
}
static void dmae_halt(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
- chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+ chcr_write(sh_chan, chcr);
}
static void dmae_init(struct sh_dmae_chan *sh_chan)
@@ -192,7 +219,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
LOG2_DEFAULT_XFER_SIZE);
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ chcr_write(sh_chan, chcr);
}
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
@@ -202,23 +229,25 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
return -EBUSY;
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
- sh_dmae_writel(sh_chan, val, CHCR);
+ chcr_write(sh_chan, val);
return 0;
}
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
u16 __iomem *addr = shdev->dmars;
- int shift = chan_pdata->dmars_bit;
+ unsigned int shift = chan_pdata->dmars_bit;
if (dmae_is_busy(sh_chan))
return -EBUSY;
+ if (pdata->no_dmars)
+ return 0;
+
/* in the case of a missing DMARS resource use first memory window */
if (!addr)
addr = (u16 __iomem *)shdev->chan_reg;
@@ -296,9 +325,7 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
static const struct sh_dmae_slave_config *sh_dmae_find_slave(
struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
{
- struct dma_device *dma_dev = sh_chan->common.device;
- struct sh_dmae_device *shdev = container_of(dma_dev,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
@@ -771,10 +798,8 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
spin_lock_bh(&sh_chan->desc_lock);
/* DMA work check */
- if (dmae_is_busy(sh_chan)) {
- spin_unlock_bh(&sh_chan->desc_lock);
- return;
- }
+ if (dmae_is_busy(sh_chan))
+ goto sh_chan_xfer_ld_queue_end;
/* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -788,6 +813,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
break;
}
+sh_chan_xfer_ld_queue_end:
spin_unlock_bh(&sh_chan->desc_lock);
}
@@ -846,7 +872,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
spin_lock(&sh_chan->desc_lock);
- chcr = sh_dmae_readl(sh_chan, CHCR);
+ chcr = chcr_read(sh_chan);
if (chcr & CHCR_TE) {
/* DMA stop */
@@ -1144,6 +1170,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* platform data */
shdev->pdata = pdata;
+ if (pdata->chcr_offset)
+ shdev->chcr_offset = pdata->chcr_offset;
+ else
+ shdev->chcr_offset = CHCR;
+
+ if (pdata->chcr_ie_bit)
+ shdev->chcr_ie_bit = pdata->chcr_ie_bit;
+ else
+ shdev->chcr_ie_bit = CHCR_IE;
+
platform_set_drvdata(pdev, shdev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 5ae9fc51218..dc56576f9fd 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -47,10 +47,14 @@ struct sh_dmae_device {
struct list_head node;
u32 __iomem *chan_reg;
u16 __iomem *dmars;
+ unsigned int chcr_offset;
+ u32 chcr_ie_bit;
};
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
+#define to_sh_dev(chan) container_of(chan->common.device,\
+ struct sh_dmae_device, common)
#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 29d1addbe0c..cd3a7c726bf 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/amba/bus.h>
#include <plat/ste_dma40.h>
@@ -45,9 +46,6 @@
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0
-/* Hardware designer of the block */
-#define D40_HW_DESIGNER 0x8
-
/**
* enum 40_command - The different commands and/or statuses.
*
@@ -186,6 +184,8 @@ struct d40_base;
* @log_def: Default logical channel settings.
* @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
+ * @runtime_addr: runtime configured address.
+ * @runtime_direction: runtime configured direction.
*
* This struct can either "be" a logical or a physical channel.
*/
@@ -200,6 +200,7 @@ struct d40_chan {
struct dma_chan chan;
struct tasklet_struct tasklet;
struct list_head client;
+ struct list_head pending_queue;
struct list_head active;
struct list_head queue;
struct stedma40_chan_cfg dma_cfg;
@@ -645,7 +646,20 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
{
- list_add_tail(&desc->node, &d40c->queue);
+ list_add_tail(&desc->node, &d40c->pending_queue);
+}
+
+static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->pending_queue))
+ return NULL;
+
+ d = list_first_entry(&d40c->pending_queue,
+ struct d40_desc,
+ node);
+ return d;
}
static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
@@ -802,6 +816,11 @@ static void d40_term_all(struct d40_chan *d40c)
d40_desc_free(d40c, d40d);
}
+ /* Release pending descriptors */
+ while ((d40d = d40_first_pending(d40c))) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
d40c->pending_tx = 0;
d40c->busy = false;
@@ -2092,7 +2111,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
struct scatterlist *sg;
int i;
- sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
+ sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
for (i = 0; i < periods; i++) {
sg_dma_address(&sg[i]) = dma_addr;
sg_dma_len(&sg[i]) = period_len;
@@ -2152,24 +2171,87 @@ static void d40_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&d40c->lock, flags);
- /* Busy means that pending jobs are already being processed */
+ list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
+
+ /* Busy means that queued jobs are already being processed */
if (!d40c->busy)
(void) d40_queue_start(d40c);
spin_unlock_irqrestore(&d40c->lock, flags);
}
+static int
+dma40_config_to_halfchannel(struct d40_chan *d40c,
+ struct stedma40_half_channel_info *info,
+ enum dma_slave_buswidth width,
+ u32 maxburst)
+{
+ enum stedma40_periph_data_width addr_width;
+ int psize;
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ addr_width = STEDMA40_BYTE_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ addr_width = STEDMA40_HALFWORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ addr_width = STEDMA40_WORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ addr_width = STEDMA40_DOUBLEWORD_WIDTH;
+ break;
+ default:
+ dev_err(d40c->base->dev,
+ "illegal peripheral address width "
+ "requested (%d)\n",
+ width);
+ return -EINVAL;
+ }
+
+ if (chan_is_logical(d40c)) {
+ if (maxburst >= 16)
+ psize = STEDMA40_PSIZE_LOG_16;
+ else if (maxburst >= 8)
+ psize = STEDMA40_PSIZE_LOG_8;
+ else if (maxburst >= 4)
+ psize = STEDMA40_PSIZE_LOG_4;
+ else
+ psize = STEDMA40_PSIZE_LOG_1;
+ } else {
+ if (maxburst >= 16)
+ psize = STEDMA40_PSIZE_PHY_16;
+ else if (maxburst >= 8)
+ psize = STEDMA40_PSIZE_PHY_8;
+ else if (maxburst >= 4)
+ psize = STEDMA40_PSIZE_PHY_4;
+ else
+ psize = STEDMA40_PSIZE_PHY_1;
+ }
+
+ info->data_width = addr_width;
+ info->psize = psize;
+ info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+
+ return 0;
+}
+
/* Runtime reconfiguration extension */
-static void d40_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+static int d40_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
{
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
- enum dma_slave_buswidth config_addr_width;
+ enum dma_slave_buswidth src_addr_width, dst_addr_width;
dma_addr_t config_addr;
- u32 config_maxburst;
- enum stedma40_periph_data_width addr_width;
- int psize;
+ u32 src_maxburst, dst_maxburst;
+ int ret;
+
+ src_addr_width = config->src_addr_width;
+ src_maxburst = config->src_maxburst;
+ dst_addr_width = config->dst_addr_width;
+ dst_maxburst = config->dst_maxburst;
if (config->direction == DMA_FROM_DEVICE) {
dma_addr_t dev_addr_rx =
@@ -2188,8 +2270,11 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dir);
cfg->dir = STEDMA40_PERIPH_TO_MEM;
- config_addr_width = config->src_addr_width;
- config_maxburst = config->src_maxburst;
+ /* Configure the memory side */
+ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ dst_addr_width = src_addr_width;
+ if (dst_maxburst == 0)
+ dst_maxburst = src_maxburst;
} else if (config->direction == DMA_TO_DEVICE) {
dma_addr_t dev_addr_tx =
@@ -2208,68 +2293,39 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dir);
cfg->dir = STEDMA40_MEM_TO_PERIPH;
- config_addr_width = config->dst_addr_width;
- config_maxburst = config->dst_maxburst;
-
+ /* Configure the memory side */
+ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ src_addr_width = dst_addr_width;
+ if (src_maxburst == 0)
+ src_maxburst = dst_maxburst;
} else {
dev_err(d40c->base->dev,
"unrecognized channel direction %d\n",
config->direction);
- return;
+ return -EINVAL;
}
- switch (config_addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- addr_width = STEDMA40_BYTE_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- addr_width = STEDMA40_HALFWORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- addr_width = STEDMA40_WORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_8_BYTES:
- addr_width = STEDMA40_DOUBLEWORD_WIDTH;
- break;
- default:
+ if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
dev_err(d40c->base->dev,
- "illegal peripheral address width "
- "requested (%d)\n",
- config->src_addr_width);
- return;
+ "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
+ src_maxburst,
+ src_addr_width,
+ dst_maxburst,
+ dst_addr_width);
+ return -EINVAL;
}
- if (chan_is_logical(d40c)) {
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_LOG_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_LOG_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_LOG_4;
- else
- psize = STEDMA40_PSIZE_LOG_1;
- } else {
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_PHY_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_PHY_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_PHY_4;
- else if (config_maxburst >= 2)
- psize = STEDMA40_PSIZE_PHY_2;
- else
- psize = STEDMA40_PSIZE_PHY_1;
- }
+ ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
+ src_addr_width,
+ src_maxburst);
+ if (ret)
+ return ret;
- /* Set up all the endpoint configs */
- cfg->src_info.data_width = addr_width;
- cfg->src_info.psize = psize;
- cfg->src_info.big_endian = false;
- cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
- cfg->dst_info.data_width = addr_width;
- cfg->dst_info.psize = psize;
- cfg->dst_info.big_endian = false;
- cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+ ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
+ dst_addr_width,
+ dst_maxburst);
+ if (ret)
+ return ret;
/* Fill in register values */
if (chan_is_logical(d40c))
@@ -2282,12 +2338,14 @@ static void d40_set_runtime_config(struct dma_chan *chan,
d40c->runtime_addr = config_addr;
d40c->runtime_direction = config->direction;
dev_dbg(d40c->base->dev,
- "configured channel %s for %s, data width %d, "
- "maxburst %d bytes, LE, no flow control\n",
+ "configured channel %s for %s, data width %d/%d, "
+ "maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
- config_addr_width,
- config_maxburst);
+ src_addr_width, dst_addr_width,
+ src_maxburst, dst_maxburst);
+
+ return 0;
}
static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -2308,9 +2366,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
case DMA_RESUME:
return d40_resume(d40c);
case DMA_SLAVE_CONFIG:
- d40_set_runtime_config(chan,
+ return d40_set_runtime_config(chan,
(struct dma_slave_config *) arg);
- return 0;
default:
break;
}
@@ -2341,6 +2398,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue);
+ INIT_LIST_HEAD(&d40c->pending_queue);
INIT_LIST_HEAD(&d40c->client);
tasklet_init(&d40c->tasklet, dma_tasklet,
@@ -2502,25 +2560,6 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
- static const struct d40_reg_val dma_id_regs[] = {
- /* Peripheral Id */
- { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
- { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
- /*
- * D40_DREG_PERIPHID2 Depends on HW revision:
- * DB8500ed has 0x0008,
- * ? has 0x0018,
- * DB8500v1 has 0x0028
- * DB8500v2 has 0x0038
- */
- { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
-
- /* PCell Id */
- { .reg = D40_DREG_CELLID0, .val = 0x000d},
- { .reg = D40_DREG_CELLID1, .val = 0x00f0},
- { .reg = D40_DREG_CELLID2, .val = 0x0005},
- { .reg = D40_DREG_CELLID3, .val = 0x00b1}
- };
struct stedma40_platform_data *plat_data;
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
@@ -2529,8 +2568,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
int num_log_chans = 0;
int num_phy_chans;
int i;
- u32 val;
- u32 rev;
+ u32 pid;
+ u32 cid;
+ u8 rev;
clk = clk_get(&pdev->dev, NULL);
@@ -2554,32 +2594,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!virtbase)
goto failure;
- /* HW version check */
- for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
- if (dma_id_regs[i].val !=
- readl(virtbase + dma_id_regs[i].reg)) {
- d40_err(&pdev->dev,
- "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
- dma_id_regs[i].val,
- dma_id_regs[i].reg,
- readl(virtbase + dma_id_regs[i].reg));
- goto failure;
- }
- }
+ /* This is just a regular AMBA PrimeCell ID actually */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
+ & 255) << (i * 8);
+ for (cid = 0, i = 0; i < 4; i++)
+ cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
+ & 255) << (i * 8);
- /* Get silicon revision and designer */
- val = readl(virtbase + D40_DREG_PERIPHID2);
-
- if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
- D40_HW_DESIGNER) {
+ if (cid != AMBA_CID) {
+ d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
+ goto failure;
+ }
+ if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
- val & D40_DREG_PERIPHID2_DESIGNER_MASK,
- D40_HW_DESIGNER);
+ AMBA_MANF_BITS(pid),
+ AMBA_VENDOR_ST);
goto failure;
}
-
- rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
- D40_DREG_PERIPHID2_REV_POS;
+ /*
+ * HW revision:
+ * DB8500ed has revision 0
+ * ? has revision 1
+ * DB8500v1 has revision 2
+ * DB8500v2 has revision 3
+ */
+ rev = AMBA_REV_BITS(pid);
/* The number of physical channels on this HW */
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 195ee65ee7f..b44c455158d 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -184,9 +184,6 @@
#define D40_DREG_PERIPHID0 0xFE0
#define D40_DREG_PERIPHID1 0xFE4
#define D40_DREG_PERIPHID2 0xFE8
-#define D40_DREG_PERIPHID2_REV_POS 4
-#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
-#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
#define D40_DREG_PERIPHID3 0xFEC
#define D40_DREG_CELLID0 0xFF0
#define D40_DREG_CELLID1 0xFF4
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 30da70d06a6..cdae207028a 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -45,13 +45,13 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
return 0;
}
-static struct pci_device_id __initdata pci_eisa_pci_tbl[] = {
+static struct pci_device_id pci_eisa_pci_tbl[] = {
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 },
{ 0, }
};
-static struct pci_driver __initdata pci_eisa_driver = {
+static struct pci_driver __refdata pci_eisa_driver = {
.name = "pci_eisa",
.id_table = pci_eisa_pci_tbl,
.probe = pci_eisa_init,
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 5f29aafd446..eb80b549ed8 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -78,6 +78,7 @@
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/pstore.h>
#include <asm/uaccess.h>
@@ -89,6 +90,8 @@ MODULE_DESCRIPTION("sysfs interface to EFI Variables");
MODULE_LICENSE("GPL");
MODULE_VERSION(EFIVARS_VERSION);
+#define DUMP_NAME_LEN 52
+
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
@@ -119,6 +122,10 @@ struct efivar_attribute {
ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
};
+#define PSTORE_EFI_ATTRIBUTES \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS)
#define EFIVAR_ATTR(_name, _mode, _show, _store) \
struct efivar_attribute efivar_attr_##_name = { \
@@ -141,38 +148,72 @@ efivar_create_sysfs_entry(struct efivars *efivars,
/* Return the number of unicode characters in data */
static unsigned long
-utf8_strlen(efi_char16_t *data, unsigned long maxlength)
+utf16_strnlen(efi_char16_t *s, size_t maxlength)
{
unsigned long length = 0;
- while (*data++ != 0 && length < maxlength)
+ while (*s++ != 0 && length < maxlength)
length++;
return length;
}
+static inline unsigned long
+utf16_strlen(efi_char16_t *s)
+{
+ return utf16_strnlen(s, ~0UL);
+}
+
/*
* Return the number of bytes is the length of this string
* Note: this is NOT the same as the number of unicode characters
*/
static inline unsigned long
-utf8_strsize(efi_char16_t *data, unsigned long maxlength)
+utf16_strsize(efi_char16_t *data, unsigned long maxlength)
{
- return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
+ return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
+}
+
+static inline int
+utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
+{
+ while (1) {
+ if (len == 0)
+ return 0;
+ if (*a < *b)
+ return -1;
+ if (*a > *b)
+ return 1;
+ if (*a == 0) /* implies *b == 0 */
+ return 0;
+ a++;
+ b++;
+ len--;
+ }
}
static efi_status_t
-get_var_data(struct efivars *efivars, struct efi_variable *var)
+get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
{
efi_status_t status;
- spin_lock(&efivars->lock);
var->DataSize = 1024;
status = efivars->ops->get_variable(var->VariableName,
&var->VendorGuid,
&var->Attributes,
&var->DataSize,
var->Data);
+ return status;
+}
+
+static efi_status_t
+get_var_data(struct efivars *efivars, struct efi_variable *var)
+{
+ efi_status_t status;
+
+ spin_lock(&efivars->lock);
+ status = get_var_data_locked(efivars, var);
spin_unlock(&efivars->lock);
+
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
status);
@@ -387,12 +428,180 @@ static struct kobj_type efivar_ktype = {
.default_attrs = def_attrs,
};
+static struct pstore_info efi_pstore_info;
+
static inline void
efivar_unregister(struct efivar_entry *var)
{
kobject_put(&var->kobj);
}
+#ifdef CONFIG_PSTORE
+
+static int efi_pstore_open(struct pstore_info *psi)
+{
+ struct efivars *efivars = psi->data;
+
+ spin_lock(&efivars->lock);
+ efivars->walk_entry = list_first_entry(&efivars->list,
+ struct efivar_entry, list);
+ return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+ struct efivars *efivars = psi->data;
+
+ spin_unlock(&efivars->lock);
+ return 0;
+}
+
+static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+ struct timespec *timespec, struct pstore_info *psi)
+{
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ struct efivars *efivars = psi->data;
+ char name[DUMP_NAME_LEN];
+ int i;
+ unsigned int part, size;
+ unsigned long time;
+
+ while (&efivars->walk_entry->list != &efivars->list) {
+ if (!efi_guidcmp(efivars->walk_entry->var.VendorGuid,
+ vendor)) {
+ for (i = 0; i < DUMP_NAME_LEN; i++) {
+ name[i] = efivars->walk_entry->var.VariableName[i];
+ }
+ if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) {
+ *id = part;
+ timespec->tv_sec = time;
+ timespec->tv_nsec = 0;
+ get_var_data_locked(efivars, &efivars->walk_entry->var);
+ size = efivars->walk_entry->var.DataSize;
+ memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+ efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
+ struct efivar_entry, list);
+ return size;
+ }
+ }
+ efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
+ struct efivar_entry, list);
+ }
+ return 0;
+}
+
+static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi)
+{
+ char name[DUMP_NAME_LEN];
+ char stub_name[DUMP_NAME_LEN];
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ struct efivars *efivars = psi->data;
+ struct efivar_entry *entry, *found = NULL;
+ int i;
+
+ sprintf(stub_name, "dump-type%u-%u-", type, part);
+ sprintf(name, "%s%lu", stub_name, get_seconds());
+
+ spin_lock(&efivars->lock);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = stub_name[i];
+
+ /*
+ * Clean up any entries with the same name
+ */
+
+ list_for_each_entry(entry, &efivars->list, list) {
+ get_var_data_locked(efivars, &entry->var);
+
+ if (efi_guidcmp(entry->var.VendorGuid, vendor))
+ continue;
+ if (utf16_strncmp(entry->var.VariableName, efi_name,
+ utf16_strlen(efi_name)))
+ continue;
+ /* Needs to be a prefix */
+ if (entry->var.VariableName[utf16_strlen(efi_name)] == 0)
+ continue;
+
+ /* found */
+ found = entry;
+ efivars->ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ PSTORE_EFI_ATTRIBUTES,
+ 0, NULL);
+ }
+
+ if (found)
+ list_del(&found->list);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = name[i];
+
+ efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
+ size, psi->buf);
+
+ spin_unlock(&efivars->lock);
+
+ if (found)
+ efivar_unregister(found);
+
+ if (size)
+ efivar_create_sysfs_entry(efivars,
+ utf16_strsize(efi_name,
+ DUMP_NAME_LEN * 2),
+ efi_name, &vendor);
+
+ return part;
+};
+
+static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ efi_pstore_write(type, id, 0, psi);
+
+ return 0;
+}
+#else
+static int efi_pstore_open(struct pstore_info *psi)
+{
+ return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+ return 0;
+}
+
+static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+ struct timespec *time, struct pstore_info *psi)
+{
+ return -1;
+}
+
+static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi)
+{
+ return 0;
+}
+
+static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ return 0;
+}
+#endif
+
+static struct pstore_info efi_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "efi",
+ .open = efi_pstore_open,
+ .close = efi_pstore_close,
+ .read = efi_pstore_read,
+ .write = efi_pstore_write,
+ .erase = efi_pstore_erase,
+};
static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -414,8 +623,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = utf8_strsize(new_var->VariableName, 1024);
+ strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = utf16_strsize(new_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
new_var->VariableName, strsize1) &&
@@ -447,8 +656,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
/* Create the entry in sysfs. Locking is not required here */
status = efivar_create_sysfs_entry(efivars,
- utf8_strsize(new_var->VariableName,
- 1024),
+ utf16_strsize(new_var->VariableName,
+ 1024),
new_var->VariableName,
&new_var->VendorGuid);
if (status) {
@@ -477,8 +686,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = utf8_strsize(del_var->VariableName, 1024);
+ strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = utf16_strsize(del_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
del_var->VariableName, strsize1) &&
@@ -763,6 +972,16 @@ int register_efivars(struct efivars *efivars,
if (error)
unregister_efivars(efivars);
+ efivars->efi_pstore_info = efi_pstore_info;
+
+ efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+ if (efivars->efi_pstore_info.buf) {
+ efivars->efi_pstore_info.bufsize = 1024;
+ efivars->efi_pstore_info.data = efivars;
+ mutex_init(&efivars->efi_pstore_info.buf_mutex);
+ pstore_register(&efivars->efi_pstore_info);
+ }
+
out:
kfree(variable_name);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 363498697c2..d539efd96d4 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -103,6 +103,22 @@ config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
+config GPIO_MSM_V1
+ tristate "Qualcomm MSM GPIO v1"
+ depends on GPIOLIB && ARCH_MSM
+ help
+ Say yes here to support the GPIO interface on ARM v6 based
+ Qualcomm MSM chips. Most of the pins on the MSM can be
+ selected for GPIO, and are controlled by this driver.
+
+config GPIO_MSM_V2
+ tristate "Qualcomm MSM GPIO v2"
+ depends on GPIOLIB && ARCH_MSM
+ help
+ Say yes here to support the GPIO interface on ARM v7 based
+ Qualcomm MSM chips. Most of the pins on the MSM can be
+ selected for GPIO, and are controlled by this driver.
+
config GPIO_MXC
def_bool y
depends on ARCH_MXC
@@ -280,6 +296,12 @@ config GPIO_TC3589X
This enables support for the GPIOs found on the TC3589X
I/O Expander.
+config GPIO_TPS65912
+ tristate "TI TPS65912 GPIO"
+ depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+ help
+ This driver supports TPS65912 gpio chip
+
config GPIO_TWL4030
tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
depends on TWL4030_CORE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 72071125139..9588948c96f 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -27,6 +27,8 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
+obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o
+obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
@@ -48,6 +50,7 @@ obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
+obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_MACH_U300) += gpio-u300.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
index ed795e64eea..050c05d9189 100644
--- a/drivers/gpio/gpio-ab8500.c
+++ b/drivers/gpio/gpio-ab8500.c
@@ -516,5 +516,5 @@ module_exit(ab8500_gpio_exit);
MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO");
-MODULE_ALIAS("AB8500 GPIO driver");
+MODULE_ALIAS("platform:ab8500-gpio");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
new file mode 100644
index 00000000000..52a4d4286eb
--- /dev/null
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -0,0 +1,636 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <mach/cpu.h>
+#include <mach/msm_gpiomux.h>
+#include <mach/msm_iomap.h>
+
+/* see 80-VA736-2 Rev C pp 695-751
+**
+** These are actually the *shadow* gpio registers, since the
+** real ones (which allow full access) are only available to the
+** ARM9 side of the world.
+**
+** Since the _BASE need to be page-aligned when we're mapping them
+** to virtual addresses, adjust for the additional offset in these
+** macros.
+*/
+
+#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off))
+#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off))
+#define MSM_GPIO1_SHADOW_REG(off) (MSM_GPIO1_BASE + 0x800 + (off))
+#define MSM_GPIO2_SHADOW_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off))
+
+/*
+ * MSM7X00 registers
+ */
+/* output value */
+#define MSM7X00_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
+#define MSM7X00_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
+#define MSM7X00_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
+#define MSM7X00_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
+#define MSM7X00_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 106-95 */
+#define MSM7X00_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x50) /* gpio 107-121 */
+
+/* same pin map as above, output enable */
+#define MSM7X00_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x10)
+#define MSM7X00_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
+#define MSM7X00_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x14)
+#define MSM7X00_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x18)
+#define MSM7X00_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x1C)
+#define MSM7X00_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x54)
+
+/* same pin map as above, input read */
+#define MSM7X00_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x34)
+#define MSM7X00_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
+#define MSM7X00_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x38)
+#define MSM7X00_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x3C)
+#define MSM7X00_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x40)
+#define MSM7X00_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x44)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define MSM7X00_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x60)
+#define MSM7X00_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
+#define MSM7X00_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x64)
+#define MSM7X00_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x68)
+#define MSM7X00_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x6C)
+#define MSM7X00_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0xC0)
+
+/* same pin map as above, 1=positive 0=negative */
+#define MSM7X00_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x70)
+#define MSM7X00_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
+#define MSM7X00_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x74)
+#define MSM7X00_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x78)
+#define MSM7X00_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x7C)
+#define MSM7X00_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xBC)
+
+/* same pin map as above, interrupt enable */
+#define MSM7X00_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0x80)
+#define MSM7X00_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
+#define MSM7X00_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0x84)
+#define MSM7X00_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0x88)
+#define MSM7X00_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0x8C)
+#define MSM7X00_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xB8)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define MSM7X00_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0x90)
+#define MSM7X00_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
+#define MSM7X00_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0x94)
+#define MSM7X00_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0x98)
+#define MSM7X00_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0x9C)
+#define MSM7X00_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xB4)
+
+/* same pin map as above, 1=interrupt pending */
+#define MSM7X00_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xA0)
+#define MSM7X00_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
+#define MSM7X00_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xA4)
+#define MSM7X00_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xA8)
+#define MSM7X00_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xAC)
+#define MSM7X00_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0xB0)
+
+/*
+ * QSD8X50 registers
+ */
+/* output value */
+#define QSD8X50_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
+#define QSD8X50_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
+#define QSD8X50_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
+#define QSD8X50_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
+#define QSD8X50_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 103-95 */
+#define QSD8X50_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x10) /* gpio 121-104 */
+#define QSD8X50_GPIO_OUT_6 MSM_GPIO1_SHADOW_REG(0x14) /* gpio 152-122 */
+#define QSD8X50_GPIO_OUT_7 MSM_GPIO1_SHADOW_REG(0x18) /* gpio 164-153 */
+
+/* same pin map as above, output enable */
+#define QSD8X50_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x20)
+#define QSD8X50_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
+#define QSD8X50_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x24)
+#define QSD8X50_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x28)
+#define QSD8X50_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x2C)
+#define QSD8X50_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x30)
+#define QSD8X50_GPIO_OE_6 MSM_GPIO1_SHADOW_REG(0x34)
+#define QSD8X50_GPIO_OE_7 MSM_GPIO1_SHADOW_REG(0x38)
+
+/* same pin map as above, input read */
+#define QSD8X50_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x50)
+#define QSD8X50_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
+#define QSD8X50_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x54)
+#define QSD8X50_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x58)
+#define QSD8X50_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x5C)
+#define QSD8X50_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x60)
+#define QSD8X50_GPIO_IN_6 MSM_GPIO1_SHADOW_REG(0x64)
+#define QSD8X50_GPIO_IN_7 MSM_GPIO1_SHADOW_REG(0x68)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define QSD8X50_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x70)
+#define QSD8X50_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
+#define QSD8X50_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x74)
+#define QSD8X50_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x78)
+#define QSD8X50_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x7C)
+#define QSD8X50_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0x80)
+#define QSD8X50_GPIO_INT_EDGE_6 MSM_GPIO1_SHADOW_REG(0x84)
+#define QSD8X50_GPIO_INT_EDGE_7 MSM_GPIO1_SHADOW_REG(0x88)
+
+/* same pin map as above, 1=positive 0=negative */
+#define QSD8X50_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x90)
+#define QSD8X50_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
+#define QSD8X50_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x94)
+#define QSD8X50_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x98)
+#define QSD8X50_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x9C)
+#define QSD8X50_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xA0)
+#define QSD8X50_GPIO_INT_POS_6 MSM_GPIO1_SHADOW_REG(0xA4)
+#define QSD8X50_GPIO_INT_POS_7 MSM_GPIO1_SHADOW_REG(0xA8)
+
+/* same pin map as above, interrupt enable */
+#define QSD8X50_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0xB0)
+#define QSD8X50_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
+#define QSD8X50_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0xB4)
+#define QSD8X50_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0xB8)
+#define QSD8X50_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0xBC)
+#define QSD8X50_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xC0)
+#define QSD8X50_GPIO_INT_EN_6 MSM_GPIO1_SHADOW_REG(0xC4)
+#define QSD8X50_GPIO_INT_EN_7 MSM_GPIO1_SHADOW_REG(0xC8)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define QSD8X50_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0xD0)
+#define QSD8X50_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
+#define QSD8X50_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0xD4)
+#define QSD8X50_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0xD8)
+#define QSD8X50_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0xDC)
+#define QSD8X50_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xE0)
+#define QSD8X50_GPIO_INT_CLEAR_6 MSM_GPIO1_SHADOW_REG(0xE4)
+#define QSD8X50_GPIO_INT_CLEAR_7 MSM_GPIO1_SHADOW_REG(0xE8)
+
+/* same pin map as above, 1=interrupt pending */
+#define QSD8X50_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xF0)
+#define QSD8X50_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
+#define QSD8X50_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xF4)
+#define QSD8X50_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xF8)
+#define QSD8X50_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xFC)
+#define QSD8X50_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0x100)
+#define QSD8X50_GPIO_INT_STATUS_6 MSM_GPIO1_SHADOW_REG(0x104)
+#define QSD8X50_GPIO_INT_STATUS_7 MSM_GPIO1_SHADOW_REG(0x108)
+
+/*
+ * MSM7X30 registers
+ */
+/* output value */
+#define MSM7X30_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
+#define MSM7X30_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */
+#define MSM7X30_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */
+#define MSM7X30_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
+#define MSM7X30_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */
+#define MSM7X30_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */
+#define MSM7X30_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */
+#define MSM7X30_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */
+
+/* same pin map as above, output enable */
+#define MSM7X30_GPIO_OE_0 MSM_GPIO1_REG(0x10)
+#define MSM7X30_GPIO_OE_1 MSM_GPIO2_REG(0x08)
+#define MSM7X30_GPIO_OE_2 MSM_GPIO1_REG(0x14)
+#define MSM7X30_GPIO_OE_3 MSM_GPIO1_REG(0x18)
+#define MSM7X30_GPIO_OE_4 MSM_GPIO1_REG(0x1C)
+#define MSM7X30_GPIO_OE_5 MSM_GPIO1_REG(0x54)
+#define MSM7X30_GPIO_OE_6 MSM_GPIO1_REG(0xC8)
+#define MSM7X30_GPIO_OE_7 MSM_GPIO1_REG(0x218)
+
+/* same pin map as above, input read */
+#define MSM7X30_GPIO_IN_0 MSM_GPIO1_REG(0x34)
+#define MSM7X30_GPIO_IN_1 MSM_GPIO2_REG(0x20)
+#define MSM7X30_GPIO_IN_2 MSM_GPIO1_REG(0x38)
+#define MSM7X30_GPIO_IN_3 MSM_GPIO1_REG(0x3C)
+#define MSM7X30_GPIO_IN_4 MSM_GPIO1_REG(0x40)
+#define MSM7X30_GPIO_IN_5 MSM_GPIO1_REG(0x44)
+#define MSM7X30_GPIO_IN_6 MSM_GPIO1_REG(0xCC)
+#define MSM7X30_GPIO_IN_7 MSM_GPIO1_REG(0x21C)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define MSM7X30_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60)
+#define MSM7X30_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
+#define MSM7X30_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64)
+#define MSM7X30_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68)
+#define MSM7X30_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C)
+#define MSM7X30_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0)
+#define MSM7X30_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0)
+#define MSM7X30_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240)
+
+/* same pin map as above, 1=positive 0=negative */
+#define MSM7X30_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70)
+#define MSM7X30_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
+#define MSM7X30_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74)
+#define MSM7X30_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78)
+#define MSM7X30_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C)
+#define MSM7X30_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC)
+#define MSM7X30_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4)
+#define MSM7X30_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228)
+
+/* same pin map as above, interrupt enable */
+#define MSM7X30_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80)
+#define MSM7X30_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
+#define MSM7X30_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84)
+#define MSM7X30_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88)
+#define MSM7X30_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C)
+#define MSM7X30_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8)
+#define MSM7X30_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8)
+#define MSM7X30_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define MSM7X30_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90)
+#define MSM7X30_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
+#define MSM7X30_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94)
+#define MSM7X30_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98)
+#define MSM7X30_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C)
+#define MSM7X30_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4)
+#define MSM7X30_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC)
+#define MSM7X30_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230)
+
+/* same pin map as above, 1=interrupt pending */
+#define MSM7X30_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0)
+#define MSM7X30_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
+#define MSM7X30_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4)
+#define MSM7X30_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8)
+#define MSM7X30_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC)
+#define MSM7X30_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0)
+#define MSM7X30_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0)
+#define MSM7X30_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234)
+
+#define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0)
+
+#define MSM_GPIO_BANK(soc, bank, first, last) \
+ { \
+ .regs = { \
+ .out = soc##_GPIO_OUT_##bank, \
+ .in = soc##_GPIO_IN_##bank, \
+ .int_status = soc##_GPIO_INT_STATUS_##bank, \
+ .int_clear = soc##_GPIO_INT_CLEAR_##bank, \
+ .int_en = soc##_GPIO_INT_EN_##bank, \
+ .int_edge = soc##_GPIO_INT_EDGE_##bank, \
+ .int_pos = soc##_GPIO_INT_POS_##bank, \
+ .oe = soc##_GPIO_OE_##bank, \
+ }, \
+ .chip = { \
+ .base = (first), \
+ .ngpio = (last) - (first) + 1, \
+ .get = msm_gpio_get, \
+ .set = msm_gpio_set, \
+ .direction_input = msm_gpio_direction_input, \
+ .direction_output = msm_gpio_direction_output, \
+ .to_irq = msm_gpio_to_irq, \
+ .request = msm_gpio_request, \
+ .free = msm_gpio_free, \
+ } \
+ }
+
+#define MSM_GPIO_BROKEN_INT_CLEAR 1
+
+struct msm_gpio_regs {
+ void __iomem *out;
+ void __iomem *in;
+ void __iomem *int_status;
+ void __iomem *int_clear;
+ void __iomem *int_en;
+ void __iomem *int_edge;
+ void __iomem *int_pos;
+ void __iomem *oe;
+};
+
+struct msm_gpio_chip {
+ spinlock_t lock;
+ struct gpio_chip chip;
+ struct msm_gpio_regs regs;
+#if MSM_GPIO_BROKEN_INT_CLEAR
+ unsigned int_status_copy;
+#endif
+ unsigned int both_edge_detect;
+ unsigned int int_enable[2]; /* 0: awake, 1: sleep */
+};
+
+static int msm_gpio_write(struct msm_gpio_chip *msm_chip,
+ unsigned offset, unsigned on)
+{
+ unsigned mask = BIT(offset);
+ unsigned val;
+
+ val = readl(msm_chip->regs.out);
+ if (on)
+ writel(val | mask, msm_chip->regs.out);
+ else
+ writel(val & ~mask, msm_chip->regs.out);
+ return 0;
+}
+
+static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip)
+{
+ int loop_limit = 100;
+ unsigned pol, val, val2, intstat;
+ do {
+ val = readl(msm_chip->regs.in);
+ pol = readl(msm_chip->regs.int_pos);
+ pol = (pol & ~msm_chip->both_edge_detect) |
+ (~val & msm_chip->both_edge_detect);
+ writel(pol, msm_chip->regs.int_pos);
+ intstat = readl(msm_chip->regs.int_status);
+ val2 = readl(msm_chip->regs.in);
+ if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0)
+ return;
+ } while (loop_limit-- > 0);
+ printk(KERN_ERR "msm_gpio_update_both_edge_detect, "
+ "failed to reach stable state %x != %x\n", val, val2);
+}
+
+static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip,
+ unsigned offset)
+{
+ unsigned bit = BIT(offset);
+
+#if MSM_GPIO_BROKEN_INT_CLEAR
+ /* Save interrupts that already triggered before we loose them. */
+ /* Any interrupt that triggers between the read of int_status */
+ /* and the write to int_clear will still be lost though. */
+ msm_chip->int_status_copy |= readl(msm_chip->regs.int_status);
+ msm_chip->int_status_copy &= ~bit;
+#endif
+ writel(bit, msm_chip->regs.int_clear);
+ msm_gpio_update_both_edge_detect(msm_chip);
+ return 0;
+}
+
+static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct msm_gpio_chip *msm_chip;
+ unsigned long irq_flags;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ writel(readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static int
+msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct msm_gpio_chip *msm_chip;
+ unsigned long irq_flags;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ msm_gpio_write(msm_chip, offset, value);
+ writel(readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct msm_gpio_chip *msm_chip;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ return (readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0;
+}
+
+static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct msm_gpio_chip *msm_chip;
+ unsigned long irq_flags;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ msm_gpio_write(msm_chip, offset, value);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return MSM_GPIO_TO_INT(chip->base + offset);
+}
+
+#ifdef CONFIG_MSM_GPIOMUX
+static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return msm_gpiomux_get(chip->base + offset);
+}
+
+static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ msm_gpiomux_put(chip->base + offset);
+}
+#else
+#define msm_gpio_request NULL
+#define msm_gpio_free NULL
+#endif
+
+static struct msm_gpio_chip *msm_gpio_chips;
+static int msm_gpio_count;
+
+static struct msm_gpio_chip msm_gpio_chips_msm7x01[] = {
+ MSM_GPIO_BANK(MSM7X00, 0, 0, 15),
+ MSM_GPIO_BANK(MSM7X00, 1, 16, 42),
+ MSM_GPIO_BANK(MSM7X00, 2, 43, 67),
+ MSM_GPIO_BANK(MSM7X00, 3, 68, 94),
+ MSM_GPIO_BANK(MSM7X00, 4, 95, 106),
+ MSM_GPIO_BANK(MSM7X00, 5, 107, 121),
+};
+
+static struct msm_gpio_chip msm_gpio_chips_msm7x30[] = {
+ MSM_GPIO_BANK(MSM7X30, 0, 0, 15),
+ MSM_GPIO_BANK(MSM7X30, 1, 16, 43),
+ MSM_GPIO_BANK(MSM7X30, 2, 44, 67),
+ MSM_GPIO_BANK(MSM7X30, 3, 68, 94),
+ MSM_GPIO_BANK(MSM7X30, 4, 95, 106),
+ MSM_GPIO_BANK(MSM7X30, 5, 107, 133),
+ MSM_GPIO_BANK(MSM7X30, 6, 134, 150),
+ MSM_GPIO_BANK(MSM7X30, 7, 151, 181),
+};
+
+static struct msm_gpio_chip msm_gpio_chips_qsd8x50[] = {
+ MSM_GPIO_BANK(QSD8X50, 0, 0, 15),
+ MSM_GPIO_BANK(QSD8X50, 1, 16, 42),
+ MSM_GPIO_BANK(QSD8X50, 2, 43, 67),
+ MSM_GPIO_BANK(QSD8X50, 3, 68, 94),
+ MSM_GPIO_BANK(QSD8X50, 4, 95, 103),
+ MSM_GPIO_BANK(QSD8X50, 5, 104, 121),
+ MSM_GPIO_BANK(QSD8X50, 6, 122, 152),
+ MSM_GPIO_BANK(QSD8X50, 7, 153, 164),
+};
+
+static void msm_gpio_irq_ack(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ msm_gpio_clear_detect_status(msm_chip,
+ d->irq - gpio_to_irq(msm_chip->chip.base));
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static void msm_gpio_irq_mask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ /* level triggered interrupts are also latched */
+ if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
+ msm_gpio_clear_detect_status(msm_chip, offset);
+ msm_chip->int_enable[0] &= ~BIT(offset);
+ writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static void msm_gpio_irq_unmask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ /* level triggered interrupts are also latched */
+ if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
+ msm_gpio_clear_detect_status(msm_chip, offset);
+ msm_chip->int_enable[0] |= BIT(offset);
+ writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+
+ if (on)
+ msm_chip->int_enable[1] |= BIT(offset);
+ else
+ msm_chip->int_enable[1] &= ~BIT(offset);
+
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+ unsigned val, mask = BIT(offset);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ val = readl(msm_chip->regs.int_edge);
+ if (flow_type & IRQ_TYPE_EDGE_BOTH) {
+ writel(val | mask, msm_chip->regs.int_edge);
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ } else {
+ writel(val & ~mask, msm_chip->regs.int_edge);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ }
+ if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+ msm_chip->both_edge_detect |= mask;
+ msm_gpio_update_both_edge_detect(msm_chip);
+ } else {
+ msm_chip->both_edge_detect &= ~mask;
+ val = readl(msm_chip->regs.int_pos);
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
+ writel(val | mask, msm_chip->regs.int_pos);
+ else
+ writel(val & ~mask, msm_chip->regs.int_pos);
+ }
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ int i, j, mask;
+ unsigned val;
+
+ for (i = 0; i < msm_gpio_count; i++) {
+ struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
+ val = readl(msm_chip->regs.int_status);
+ val &= msm_chip->int_enable[0];
+ while (val) {
+ mask = val & -val;
+ j = fls(mask) - 1;
+ /* printk("%s %08x %08x bit %d gpio %d irq %d\n",
+ __func__, v, m, j, msm_chip->chip.start + j,
+ FIRST_GPIO_IRQ + msm_chip->chip.start + j); */
+ val &= ~mask;
+ generic_handle_irq(FIRST_GPIO_IRQ +
+ msm_chip->chip.base + j);
+ }
+ }
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+}
+
+static struct irq_chip msm_gpio_irq_chip = {
+ .name = "msmgpio",
+ .irq_ack = msm_gpio_irq_ack,
+ .irq_mask = msm_gpio_irq_mask,
+ .irq_unmask = msm_gpio_irq_unmask,
+ .irq_set_wake = msm_gpio_irq_set_wake,
+ .irq_set_type = msm_gpio_irq_set_type,
+};
+
+static int __init msm_init_gpio(void)
+{
+ int i, j = 0;
+
+ if (cpu_is_msm7x01()) {
+ msm_gpio_chips = msm_gpio_chips_msm7x01;
+ msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x01);
+ } else if (cpu_is_msm7x30()) {
+ msm_gpio_chips = msm_gpio_chips_msm7x30;
+ msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x30);
+ } else if (cpu_is_qsd8x50()) {
+ msm_gpio_chips = msm_gpio_chips_qsd8x50;
+ msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_qsd8x50);
+ } else {
+ return 0;
+ }
+
+ for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) {
+ if (i - FIRST_GPIO_IRQ >=
+ msm_gpio_chips[j].chip.base +
+ msm_gpio_chips[j].chip.ngpio)
+ j++;
+ irq_set_chip_data(i, &msm_gpio_chips[j]);
+ irq_set_chip_and_handler(i, &msm_gpio_irq_chip,
+ handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ for (i = 0; i < msm_gpio_count; i++) {
+ spin_lock_init(&msm_gpio_chips[i].lock);
+ writel(0, msm_gpio_chips[i].regs.int_en);
+ gpiochip_add(&msm_gpio_chips[i].chip);
+ }
+
+ irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
+ irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
+ irq_set_irq_wake(INT_GPIO_GROUP1, 1);
+ irq_set_irq_wake(INT_GPIO_GROUP2, 2);
+ return 0;
+}
+
+postcore_initcall(msm_init_gpio);
diff --git a/arch/arm/mach-msm/gpio-v2.c b/drivers/gpio/gpio-msm-v2.c
index cc9c4fd7ccc..5cb1227d69c 100644
--- a/arch/arm/mach-msm/gpio-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,8 +30,8 @@
#include <asm/mach/irq.h>
+#include <mach/msm_gpiomux.h>
#include <mach/msm_iomap.h>
-#include "gpiomux.h"
/* Bits of interest in the GPIO_IN_OUT register.
*/
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
new file mode 100644
index 00000000000..79e66c00235
--- /dev/null
+++ b/drivers/gpio/gpio-tps65912.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/mfd/tps65912.h>
+
+struct tps65912_gpio_data {
+ struct tps65912 *tps65912;
+ struct gpio_chip gpio_chip;
+};
+
+static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ int val;
+
+ val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
+
+ if (val & GPIO_STS_MASK)
+ return 1;
+
+ return 0;
+}
+
+static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+
+ if (value)
+ tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK);
+ else
+ tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK);
+}
+
+static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+
+ /* Set the initial value */
+ tps65912_gpio_set(gc, offset, value);
+
+ return tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK);
+}
+
+static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+
+ return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK);
+
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tps65912",
+ .owner = THIS_MODULE,
+ .direction_input = tps65912_gpio_input,
+ .direction_output = tps65912_gpio_output,
+ .get = tps65912_gpio_get,
+ .set = tps65912_gpio_set,
+ .can_sleep = 1,
+ .ngpio = 5,
+ .base = -1,
+};
+
+static int __devinit tps65912_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65912_board *pdata = tps65912->dev->platform_data;
+ struct tps65912_gpio_data *tps65912_gpio;
+ int ret;
+
+ tps65912_gpio = kzalloc(sizeof(*tps65912_gpio), GFP_KERNEL);
+ if (tps65912_gpio == NULL)
+ return -ENOMEM;
+
+ tps65912_gpio->tps65912 = tps65912;
+ tps65912_gpio->gpio_chip = template_chip;
+ tps65912_gpio->gpio_chip.dev = &pdev->dev;
+ if (pdata && pdata->gpio_base)
+ tps65912_gpio->gpio_chip.base = pdata->gpio_base;
+
+ ret = gpiochip_add(&tps65912_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, tps65912_gpio);
+
+ return ret;
+
+err:
+ kfree(tps65912_gpio);
+ return ret;
+}
+
+static int __devexit tps65912_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65912_gpio_data *tps65912_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&tps65912_gpio->gpio_chip);
+ if (ret == 0)
+ kfree(tps65912_gpio);
+
+ return ret;
+}
+
+static struct platform_driver tps65912_gpio_driver = {
+ .driver = {
+ .name = "tps65912-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_gpio_probe,
+ .remove = __devexit_p(tps65912_gpio_remove),
+};
+
+static int __init tps65912_gpio_init(void)
+{
+ return platform_driver_register(&tps65912_gpio_driver);
+}
+subsys_initcall(tps65912_gpio_init);
+
+static void __exit tps65912_gpio_exit(void)
+{
+ platform_driver_unregister(&tps65912_gpio_driver);
+}
+module_exit(tps65912_gpio_exit);
+
+MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("GPIO interface for TPS65912 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65912-gpio");
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 9d8c892d07c..9d2668a5087 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -90,7 +90,6 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
struct drm_device *dev = minor->dev;
struct dentry *ent;
struct drm_info_node *tmp;
- char name[64];
int i, ret;
for (i = 0; i < count; i++) {
@@ -108,6 +107,9 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
root, tmp, &drm_debugfs_fops);
if (!ent) {
+ char name[64];
+ strncpy(name, root->d_name.name,
+ min(root->d_name.len, 64U));
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
name, files[i].name);
kfree(tmp);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 756af4d7ec7..7425e5c9bd7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -127,6 +127,23 @@ static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
+ /*
+ * Sanity check the header of the base EDID block. Return 8 if the header
+ * is perfect, down to 0 if it's totally wrong.
+ */
+int drm_edid_header_is_valid(const u8 *raw_edid)
+{
+ int i, score = 0;
+
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ return score;
+}
+EXPORT_SYMBOL(drm_edid_header_is_valid);
+
+
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
@@ -139,12 +156,7 @@ drm_edid_block_valid(u8 *raw_edid)
struct edid *edid = (struct edid *)raw_edid;
if (raw_edid[0] == 0x00) {
- int score = 0;
-
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
-
+ int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
@@ -1439,6 +1451,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
static void drm_add_display_info(struct edid *edid,
struct drm_display_info *info)
{
+ u8 *edid_ext;
+
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -1483,6 +1497,13 @@ static void drm_add_display_info(struct edid *edid,
info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
+
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return;
+
+ info->cea_rev = edid_ext[1];
}
/**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2022a5c966b..3830e9e478c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -291,11 +291,14 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
if (!dev->irq_enabled)
return;
- if (state)
- dev->driver->irq_uninstall(dev);
- else {
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
+ if (state) {
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+ } else {
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_postinstall)
+ dev->driver->irq_postinstall(dev);
}
}
@@ -338,7 +341,8 @@ int drm_irq_install(struct drm_device *dev)
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
/* Before installing handler */
- dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
/* Install handler */
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
@@ -363,11 +367,16 @@ int drm_irq_install(struct drm_device *dev)
vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
/* After installing handler */
- ret = dev->driver->irq_postinstall(dev);
+ if (dev->driver->irq_postinstall)
+ ret = dev->driver->irq_postinstall(dev);
+
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
mutex_unlock(&dev->struct_mutex);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+ free_irq(drm_dev_to_irq(dev), dev);
}
return ret;
@@ -413,7 +422,8 @@ int drm_irq_uninstall(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
- dev->driver->irq_uninstall(dev);
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
free_irq(drm_dev_to_irq(dev), dev);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e2662497d50..a8ab6263e0d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1338,6 +1338,155 @@ static const struct file_operations i915_wedged_fops = {
.llseek = default_llseek,
};
+static int
+i915_max_freq_open(struct inode *inode,
+ struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+i915_max_freq_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof (buf),
+ "max freq: %d\n", dev_priv->max_delay * 50);
+
+ if (len > sizeof (buf))
+ len = sizeof (buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_max_freq_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof (buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
+
+ /*
+ * Turbo will still be enabled, but won't go above the set value.
+ */
+ dev_priv->max_delay = val / 50;
+
+ gen6_set_rps(dev, val / 50);
+
+ return cnt;
+}
+
+static const struct file_operations i915_max_freq_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_max_freq_open,
+ .read = i915_max_freq_read,
+ .write = i915_max_freq_write,
+ .llseek = default_llseek,
+};
+
+static int
+i915_cache_sharing_open(struct inode *inode,
+ struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+i915_cache_sharing_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ u32 snpcr;
+ int len;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ len = snprintf(buf, sizeof (buf),
+ "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
+ GEN6_MBC_SNPCR_SHIFT);
+
+ if (len > sizeof (buf))
+ len = sizeof (buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_cache_sharing_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ u32 snpcr;
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof (buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ if (val < 0 || val > 3)
+ return -EINVAL;
+
+ DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
+
+ /* Update the cache sharing policy here as well */
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ return cnt;
+}
+
+static const struct file_operations i915_cache_sharing_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_cache_sharing_open,
+ .read = i915_cache_sharing_read,
+ .write = i915_cache_sharing_write,
+ .llseek = default_llseek,
+};
+
/* As the drm_debugfs_init() routines are called before dev->dev_private is
* allocated we need to hook into the minor for release. */
static int
@@ -1437,6 +1586,36 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
+static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_max_freq",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_max_freq_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
+}
+
+static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_cache_sharing",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_cache_sharing_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
+}
+
static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@@ -1490,6 +1669,12 @@ int i915_debugfs_init(struct drm_minor *minor)
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
+ ret = i915_max_freq_create(minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+ ret = i915_cache_sharing_create(minor->debugfs_root, minor);
+ if (ret)
+ return ret;
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -1504,6 +1689,10 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
+ 1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 12712824a6d..8a3942c4f09 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -61,7 +61,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
@@ -71,10 +70,9 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr =
- (void __force __iomem *)dev_priv->status_page_dmah->vaddr;
- memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
+ memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
+ 0, PAGE_SIZE);
i915_write_hws_pga(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6867e193d85..feb4f164fd1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -544,6 +544,7 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
+ u32 savePCH_PORT_HOTPLUG;
struct {
/** Bridge to intel-gtt-ko */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d1cd8b89f47..a546a71fb06 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3112,7 +3112,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj);
- if (ret)
+ if (ret == -ERESTARTSYS)
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 23d1ae67d27..02f96fd0d52 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -306,12 +306,15 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
+ mutex_unlock(&mode_config->mutex);
+
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 02db299f621..d1331f771e2 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,14 @@
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
+#define GEN6_MBC_SNPCR_SHIFT 21
+#define GEN6_MBC_SNPCR_MASK (3<<21)
+#define GEN6_MBC_SNPCR_MAX (0<<21)
+#define GEN6_MBC_SNPCR_MED (1<<21)
+#define GEN6_MBC_SNPCR_LOW (2<<21)
+#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+
#define GEN6_GDRST 0x941c
#define GEN6_GRDOM_FULL (1 << 0)
#define GEN6_GRDOM_RENDER (1 << 1)
@@ -1506,6 +1514,7 @@
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
@@ -2084,9 +2093,6 @@
#define DP_PIPEB_SELECT (1 << 30)
#define DP_PIPE_MASK (1 << 30)
-#define DP_PIPE_ENABLED(V, P) \
- (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
-
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
#define DP_LINK_TRAIN_PAT_2 (1 << 28)
@@ -3024,6 +3030,20 @@
#define _TRANSA_DP_LINK_M2 0xe0048
#define _TRANSA_DP_LINK_N2 0xe004c
+/* Per-transcoder DIP controls */
+
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_GCP_A 0xe0210
+
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define _VIDEO_DIP_GCP_B 0xe1210
+
+#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
@@ -3076,6 +3096,16 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
+#define _TRANSA_CHICKEN2 0xf0064
+#define _TRANSB_CHICKEN2 0xf1064
+#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
+
+#define SOUTH_CHICKEN1 0xc2000
+#define FDIA_PHASE_SYNC_SHIFT_OVR 19
+#define FDIA_PHASE_SYNC_SHIFT_EN 18
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define SOUTH_CHICKEN2 0xc2004
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 285758603ac..87677d60d0d 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -812,6 +812,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(RSTDBYCTL);
+ dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
} else {
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
@@ -863,6 +864,7 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->saveGTIMR);
I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
} else {
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 393a39922e5..35364e68a09 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -980,11 +980,29 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
+static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe,
+ int reg, u32 port_sel, u32 val)
+{
+ if ((val & DP_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
+ u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
+ if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
+ return false;
+ } else {
+ if ((val & DP_PIPE_MASK) != (pipe << 30))
+ return false;
+ }
+ return true;
+}
+
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
+ enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- WARN(DP_PIPE_ENABLED(val, pipe),
+ WARN(dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
@@ -1004,9 +1022,9 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
reg = PCH_ADPA;
val = I915_READ(reg);
@@ -1276,6 +1294,17 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
intel_wait_for_pipe_off(dev_priv->dev, pipe);
}
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+}
+
/**
* intel_enable_plane - enable a display plane on a given pipe
* @dev_priv: i915 private structure
@@ -1299,20 +1328,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv,
return;
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
-/*
- * Plane regs are double buffered, going from enabled->disabled needs a
- * trigger in order to latch. The display address reg provides this.
- */
-static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
- enum plane plane)
-{
- u32 reg = DSPADDR(plane);
- I915_WRITE(reg, I915_READ(reg));
-}
-
/**
* intel_disable_plane - disable a display plane
* @dev_priv: i915 private structure
@@ -1338,19 +1357,24 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
}
static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
+ enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- if (DP_PIPE_ENABLED(val, pipe))
+ if (dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val)) {
+ DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
I915_WRITE(reg, val & ~DP_PORT_EN);
+ }
}
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- if (HDMI_PIPE_ENABLED(val, pipe))
+ if (HDMI_PIPE_ENABLED(val, pipe)) {
+ DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
+ reg, pipe);
I915_WRITE(reg, val & ~PORT_ENABLE);
+ }
}
/* Disable any ports connected to this transcoder */
@@ -1362,9 +1386,9 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
val = I915_READ(PCH_PP_CONTROL);
I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
- disable_pch_dp(dev_priv, pipe, PCH_DP_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
reg = PCH_ADPA;
val = I915_READ(reg);
@@ -2096,7 +2120,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
+ DRM_ERROR("No FB bound\n");
return 0;
}
@@ -2105,6 +2129,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
case 1:
break;
default:
+ DRM_ERROR("no plane for crtc\n");
return -EINVAL;
}
@@ -2114,6 +2139,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("pin & fence failed\n");
return ret;
}
@@ -2142,6 +2168,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("failed to update base address\n");
return ret;
}
@@ -2248,6 +2275,18 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
+static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flags = I915_READ(SOUTH_CHICKEN1);
+
+ flags |= FDI_PHASE_SYNC_OVR(pipe);
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
+ flags |= FDI_PHASE_SYNC_EN(pipe);
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -2398,6 +2437,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
+ if (HAS_PCH_CPT(dev))
+ cpt_phase_pointer_enable(dev, pipe);
+
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2514,6 +2556,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
+ if (HAS_PCH_CPT(dev))
+ cpt_phase_pointer_enable(dev, pipe);
+
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2623,6 +2668,17 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
}
}
+static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flags = I915_READ(SOUTH_CHICKEN1);
+
+ flags &= ~(FDI_PHASE_SYNC_EN(pipe));
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
+ flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
+ POSTING_READ(SOUTH_CHICKEN1);
+}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2652,6 +2708,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_EN));
+ } else if (HAS_PCH_CPT(dev)) {
+ cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
@@ -2862,14 +2920,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
intel_enable_pipe(dev_priv, pipe, is_pch_port);
intel_enable_plane(dev_priv, plane, pipe);
if (is_pch_port)
ironlake_pch_enable(crtc);
- intel_crtc_load_lut(crtc);
-
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
@@ -4538,7 +4600,9 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
if (connector->encoder != encoder)
continue;
- if (connector->display_info.bpc < display_bpc) {
+ /* Don't use an invalid EDID bpc value */
+ if (connector->display_info.bpc &&
+ connector->display_info.bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
display_bpc = connector->display_info.bpc;
}
@@ -5153,7 +5217,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
temp |= PIPE_12BPC;
break;
default:
- WARN(1, "intel_choose_pipe_bpp returned invalid value\n");
+ WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
+ pipe_bpp);
temp |= PIPE_8BPC;
pipe_bpp = 24;
break;
@@ -5238,7 +5303,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else if (is_sdvo && is_tv)
factor = 20;
- if (clock.m1 < factor * clock.n)
+ if (clock.m < factor * clock.n)
fp |= FP_CB_TUNE;
dpll = 0;
@@ -5516,6 +5581,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_post_modeset(dev, pipe);
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+
return ret;
}
@@ -7714,10 +7781,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
- for_each_pipe(pipe)
+ for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
@@ -7734,10 +7803,12 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
- for_each_pipe(pipe)
+ for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -7820,6 +7891,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -7829,6 +7901,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
}
static void ironlake_teardown_rc6(struct drm_device *dev)
@@ -8178,6 +8253,9 @@ struct intel_quirk intel_quirks[] = {
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
};
static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f797fb58ba9..0feae908bb3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -50,9 +50,10 @@ struct intel_dp {
bool has_audio;
int force_audio;
uint32_t color_range;
+ int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
- uint8_t dpcd[4];
+ uint8_t dpcd[8];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
@@ -316,9 +317,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
precharge = 5;
- if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
- DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
- I915_READ(ch_ctl));
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (try == 3) {
+ WARN(1, "dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
return -EBUSY;
}
@@ -770,6 +779,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
/*
* Check for DPCD version > 1.1 and enhanced framing support
@@ -1011,6 +1021,8 @@ static void intel_dp_commit(struct drm_encoder *encoder)
if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
+
+ intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
}
static void
@@ -1045,6 +1057,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
}
+ intel_dp->dpms_mode = mode;
}
/*
@@ -1334,10 +1347,16 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
u32 reg;
uint32_t DP = intel_dp->DP;
- /* Enable output, wait for it to become active */
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ /*
+ * On CPT we have to enable the port in training pattern 1, which
+ * will happen below in intel_dp_set_link_train. Otherwise, enable
+ * the port and wait for it to become active.
+ */
+ if (!HAS_PCH_CPT(dev)) {
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1370,7 +1389,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_1))
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE))
break;
/* Set training pattern 1 */
@@ -1445,7 +1465,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_2))
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE))
break;
udelay(400);
@@ -1559,6 +1580,18 @@ intel_dp_link_down(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
}
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+ if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) &&
+ (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
+ return true;
+ }
+
+ return false;
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -1571,36 +1604,44 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- int ret;
+ if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+ return;
if (!intel_dp->base.base.crtc)
return;
+ /* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
- /* Try to read receiver status if the link appears to be up */
- ret = intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd));
- if (ret != sizeof(intel_dp->dpcd)) {
+ /* Now read the DPCD to see if it's actually running */
+ if (!intel_dp_get_dpcd(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
if (!intel_channel_eq_ok(intel_dp)) {
+ DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+ drm_get_encoder_name(&intel_dp->base.base));
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
}
static enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+ if (intel_dp_get_dpcd(intel_dp))
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
- bool ret;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
@@ -1610,15 +1651,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
return status;
}
- status = connector_status_disconnected;
- ret = intel_dp_aux_native_read_retry(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd));
- if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0)
- status = connector_status_connected;
- DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
- intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
- return status;
+ return intel_dp_detect_dpcd(intel_dp);
}
static enum drm_connector_status
@@ -1626,7 +1659,6 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum drm_connector_status status;
uint32_t temp, bit;
switch (intel_dp->output_reg) {
@@ -1648,15 +1680,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
if ((temp & bit) == 0)
return connector_status_disconnected;
- status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
- {
- if (intel_dp->dpcd[DP_DPCD_REV] != 0)
- status = connector_status_connected;
- }
-
- return status;
+ return intel_dp_detect_dpcd(intel_dp);
}
/**
@@ -1679,6 +1703,12 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = ironlake_dp_detect(intel_dp);
else
status = g4x_dp_detect(intel_dp);
+
+ DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
+ intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
+ intel_dp->dpcd[6], intel_dp->dpcd[7]);
+
if (status != connector_status_connected)
return status;
@@ -1924,6 +1954,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return;
intel_dp->output_reg = output_reg;
+ intel_dp->dpms_mode = -1;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -2000,7 +2031,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
- int ret;
+ bool ret;
u32 pp_on, pp_div;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
@@ -2013,11 +2044,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
ironlake_edp_panel_vdd_on(intel_dp);
- ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
- intel_dp->dpcd,
- sizeof(intel_dp->dpcd));
+ ret = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp);
- if (ret == sizeof(intel_dp->dpcd)) {
+ if (ret) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =
intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 6e990f9760e..7b330e76a43 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -178,10 +178,28 @@ struct intel_crtc {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define DIP_HEADER_SIZE 5
+
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
+#define DIP_TYPE_SPD 0x3
+#define DIP_VERSION_SPD 0x1
+#define DIP_LEN_SPD 25
+#define DIP_SPD_UNKNOWN 0
+#define DIP_SPD_DSTB 0x1
+#define DIP_SPD_DVDP 0x2
+#define DIP_SPD_DVHS 0x3
+#define DIP_SPD_HDDVR 0x4
+#define DIP_SPD_DVC 0x5
+#define DIP_SPD_DSC 0x6
+#define DIP_SPD_VCD 0x7
+#define DIP_SPD_GAME 0x8
+#define DIP_SPD_PC 0x9
+#define DIP_SPD_BD 0xa
+#define DIP_SPD_SCD 0xb
+
struct dip_infoframe {
uint8_t type; /* HB0 */
uint8_t ver; /* HB1 */
@@ -206,6 +224,11 @@ struct dip_infoframe {
uint16_t left_bar_end;
uint16_t right_bar_start;
} avi;
+ struct {
+ uint8_t vn[8];
+ uint8_t pd[16];
+ uint8_t sdi;
+ } spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1ed8e690391..226ba830f38 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -45,6 +45,8 @@ struct intel_hdmi {
bool has_hdmi_sink;
bool has_audio;
int force_audio;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -58,37 +60,70 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
struct intel_hdmi, base);
}
-void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
+void intel_dip_infoframe_csum(struct dip_infoframe *frame)
{
- uint8_t *data = (uint8_t *)avi_if;
+ uint8_t *data = (uint8_t *)frame;
uint8_t sum = 0;
unsigned i;
- avi_if->checksum = 0;
- avi_if->ecc = 0;
+ frame->checksum = 0;
+ frame->ecc = 0;
- for (i = 0; i < sizeof(*avi_if); i++)
+ /* Header isn't part of the checksum */
+ for (i = 5; i < frame->len; i++)
sum += data[i];
- avi_if->checksum = 0x100 - sum;
+ frame->checksum = 0x100 - sum;
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+static u32 intel_infoframe_index(struct dip_infoframe *frame)
{
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
- uint32_t *data = (uint32_t *)&avi_if;
+ u32 flags = 0;
+
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ flags |= VIDEO_DIP_SELECT_AVI;
+ break;
+ case DIP_TYPE_SPD:
+ flags |= VIDEO_DIP_SELECT_SPD;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ break;
+ }
+
+ return flags;
+}
+
+static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+{
+ u32 flags = 0;
+
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
+ break;
+ case DIP_TYPE_SPD:
+ flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ break;
+ }
+
+ return flags;
+}
+
+static void i9xx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 port;
- unsigned i;
+ u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
- if (!intel_hdmi->has_hdmi_sink)
- return;
/* XXX first guess at handling video port, is this corrent? */
if (intel_hdmi->sdvox_reg == SDVOB)
@@ -98,18 +133,87 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
else
return;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
- VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
+ flags = intel_infoframe_index(frame);
+
+ val &= ~VIDEO_DIP_SELECT_MASK;
- intel_dip_infoframe_csum(&avi_if);
- for (i = 0; i < sizeof(avi_if); i += 4) {
+ I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
+
+ for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
- VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
- VIDEO_DIP_ENABLE_AVI);
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+}
+
+static void ironlake_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 flags, val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ flags = intel_infoframe_index(frame);
+
+ val &= ~VIDEO_DIP_SELECT_MASK;
+
+ I915_WRITE(reg, val | flags);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+}
+static void intel_set_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!intel_hdmi->has_hdmi_sink)
+ return;
+
+ intel_dip_infoframe_csum(frame);
+ intel_hdmi->write_infoframe(encoder, frame);
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+
+ intel_set_infoframe(encoder, &avi_if);
+}
+
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe spd_if;
+
+ memset(&spd_if, 0, sizeof(spd_if));
+ spd_if.type = DIP_TYPE_SPD;
+ spd_if.ver = DIP_VERSION_SPD;
+ spd_if.len = DIP_LEN_SPD;
+ strcpy(spd_if.body.spd.vn, "Intel");
+ strcpy(spd_if.body.spd.pd, "Integrated gfx");
+ spd_if.body.spd.sdi = DIP_SPD_PC;
+
+ intel_set_infoframe(encoder, &spd_if);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -156,6 +260,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
POSTING_READ(intel_hdmi->sdvox_reg);
intel_hdmi_set_avi_infoframe(encoder);
+ intel_hdmi_set_spd_infoframe(encoder);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -433,6 +538,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_hdmi->sdvox_reg = sdvox_reg;
+ if (!HAS_PCH_SPLIT(dev))
+ intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ else
+ intel_hdmi->write_infoframe = ironlake_write_infoframe;
+
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b28f7bd9f88..2e8ddfcba40 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -690,6 +690,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell OptiPlex FX170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "AOpen Mini PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a06ff07a4d3..05f500cd9c2 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -83,11 +83,15 @@ intel_pch_panel_fitting(struct drm_device *dev,
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
if (scaled_width > scaled_height) { /* pillar */
width = scaled_height / mode->vdisplay;
+ if (width & 1)
+ width++;
x = (adjusted_mode->hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
height = scaled_width / mode->hdisplay;
+ if (height & 1)
+ height++;
y = (adjusted_mode->vdisplay - height + 1) / 2;
x = 0;
width = adjusted_mode->hdisplay;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e9615685a39..47b9b277703 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1321,6 +1321,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->get_seqno = pc_render_get_seqno;
}
+ if (!I915_NEED_GFX_HWS(dev))
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 3896ef81110..9f363e0c4b6 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -Iinclude/drm
hostprogs-y := mkregtable
+clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
quiet_cmd_mkregtable = MKREGTABLE $@
cmd_mkregtable = $(obj)/mkregtable $< > $@
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ebdb0fdb834..e88c64417a8 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1245,6 +1245,9 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
char name[512];
int i;
+ if (!ctx)
+ return NULL;
+
ctx->card = card;
ctx->bios = bios;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 189e86522b5..a134790903d 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -428,7 +428,7 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
i = (reg >> 7);
- if (i > last_reg) {
+ if (i >= last_reg) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index db8ef1905d5..cf83aa05a68 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -915,12 +915,11 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
- u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
u32 m, i, tmp, *ib;
int r;
i = (reg >> 7);
- if (i > last_reg) {
+ if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index a74217cd192..e0138b674ac 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2557,6 +2557,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
u16 offset, misc, misc2 = 0;
u8 rev, blocks, tmp;
int state_index = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
rdev->pm.default_power_state_index = -1;
@@ -2575,7 +2576,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
if (offset) {
u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
- struct radeon_i2c_bus_rec i2c_bus;
rev = RBIOS8(offset);
@@ -2617,6 +2617,25 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
+ } else {
+ /* boards with a thermal chip, but no overdrive table */
+
+ /* Asus 9600xt has an f75375 on the monid bus */
+ if ((dev->pdev->device == 0x4152) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0xc002)) {
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = "f75375";
+ info.addr = 0x28;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ name, info.addr);
+ }
+ }
}
if (rdev->flags & RADEON_IS_MOBILITY) {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 9792d4ffdc8..6d6b5f16bc0 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -430,6 +430,45 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
return 0;
}
+/*
+ * Some integrated ATI Radeon chipset implementations (e. g.
+ * Asus M2A-VM HDMI) may indicate the availability of a DDC,
+ * even when there's no monitor connected. For these connectors
+ * following DDC probe extension will be applied: check also for the
+ * availability of EDID with at least a correct EDID header. Only then,
+ * DDC is assumed to be available. This prevents drm_get_edid() and
+ * drm_edid_block_valid() from periodically dumping data and kernel
+ * errors into the logs and onto the terminal.
+ */
+static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
+ uint32_t supported_device,
+ int connector_type)
+{
+ /* Asus M2A-VM HDMI board sends data to i2c bus even,
+ * if HDMI add-on card is not plugged in or HDMI is disabled in
+ * BIOS. Valid DDC can only be assumed, if also a valid EDID header
+ * can be retrieved via i2c bus during DDC probe */
+ if ((dev->pdev->device == 0x791e) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x826d)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+ /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
+ * for a DVI connector that is not implemented */
+ if ((dev->pdev->device == 0x796e) &&
+ (dev->pdev->subsystem_vendor == 0x1019) &&
+ (dev->pdev->subsystem_device == 0x2615)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+
+ /* Default: no EDID header probe required for DDC probing */
+ return false;
+}
+
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
@@ -661,7 +700,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -833,7 +873,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
bool dret = false;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -1251,7 +1292,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- if (radeon_ddc_probe(radeon_connector))
+ if (radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe))
ret = connector_status_connected;
}
}
@@ -1406,6 +1448,9 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
+ radeon_connector->requires_extended_probe =
+ radeon_connector_needs_extended_probe(rdev, supported_device,
+ connector_type);
radeon_connector->router = *router;
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
@@ -1752,6 +1797,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
+ radeon_connector->requires_extended_probe =
+ radeon_connector_needs_extended_probe(rdev, supported_device,
+ connector_type);
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7cfaa7e2f3b..440e6ecccc4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -704,8 +704,9 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->gpu_lockup = false;
rdev->accel_working = false;
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
- radeon_family_name[rdev->family], pdev->vendor, pdev->device);
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
+ radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
/* mutex initialization are all done here so we
* can recall function without having locking issues */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 28f4655905b..1a858944e4f 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -751,8 +751,17 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
+ /* Log EDID retrieval status here. In particular with regard to
+ * connectors with requires_extended_probe flag set, that will prevent
+ * function radeon_dvi_detect() to fetch EDID on this connector,
+ * as long as there is no valid EDID header found */
if (edid) {
+ DRM_INFO("Radeon display connector %s: Found valid EDID",
+ drm_get_connector_name(connector));
kfree(edid);
+ } else {
+ DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
+ drm_get_connector_name(connector));
}
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 85f033f19a8..e71d2ed7fa1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -50,8 +50,8 @@
* 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
* 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
* 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
- * 2.10.0 - fusion 2D tiling, initial compute support for the CS checker
- * 2.11.0 - backend map
+ * 2.10.0 - fusion 2D tiling
+ * 2.11.0 - backend map, initial compute support for the CS checker
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 11
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 781196db792..6c111c1fa3f 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -32,17 +32,17 @@
* radeon_ddc_probe
*
*/
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe)
{
- u8 out_buf[] = { 0x0, 0x0};
- u8 buf[2];
+ u8 out = 0x0;
+ u8 buf[8];
int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &out,
},
{
.addr = 0x50,
@@ -52,15 +52,31 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
};
+ /* Read 8 bytes from i2c for extended probe of EDID header */
+ if (requires_extended_probe)
+ msgs[1].len = 8;
+
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
- if (ret == 2)
- return true;
-
- return false;
+ if (ret != 2)
+ /* Couldn't find an accessible DDC on this connector */
+ return false;
+ if (requires_extended_probe) {
+ /* Probe also for valid EDID header
+ * EDID header starts with:
+ * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+ * Only the first 6 bytes must be valid as
+ * drm_edid_block_valid() can fix the last 2 bytes */
+ if (drm_edid_header_is_valid(buf) < 6) {
+ /* Couldn't find an accessible EDID on this
+ * connector */
+ return false;
+ }
+ }
+ return true;
}
/* bit banging i2c */
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6df4e3cec0c..d09031c03e2 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -438,6 +438,9 @@ struct radeon_connector {
struct radeon_i2c_chan *ddc_bus;
/* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
+ /* for some Radeon chip families we apply an additional EDID header
+ check as part of the DDC probe */
+ bool requires_extended_probe;
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
@@ -514,7 +517,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
+ bool requires_extended_probe);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 3be60da5212..67cbcfa3512 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -141,6 +141,8 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
}
+ if (hwif->index > 0)
+ pci_dev_put(dev);
}
static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 542603b394e..962693b10a1 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/ata_platform.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
@@ -95,7 +96,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
hw.dev = &pdev->dev;
- d.irq_flags = res_irq->flags;
+ d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ d.irq_flags |= IRQF_SHARED;
+
if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index ce281d15227..67df91af842 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -483,7 +483,7 @@ static int gpio_keys_get_devtree_pdata(struct device *dev,
buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL);
if (!buttons)
- return -ENODEV;
+ return -ENOMEM;
pp = NULL;
i = 0;
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index ab0acaf7fe8..756348a7f93 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -754,8 +754,11 @@ fail3:
device_remove_file(&client->dev, &dev_attr_disable_kp);
fail2:
while (--pwm >= 0)
- if (lm->pwm[pwm].enabled)
+ if (lm->pwm[pwm].enabled) {
+ device_remove_file(lm->pwm[pwm].cdev.dev,
+ &dev_attr_time);
led_classdev_unregister(&lm->pwm[pwm].cdev);
+ }
fail1:
input_free_device(idev);
kfree(lm);
@@ -775,8 +778,10 @@ static int __devexit lm8323_remove(struct i2c_client *client)
device_remove_file(&lm->client->dev, &dev_attr_disable_kp);
for (i = 0; i < 3; i++)
- if (lm->pwm[i].enabled)
+ if (lm->pwm[i].enabled) {
+ device_remove_file(lm->pwm[i].cdev.dev, &dev_attr_time);
led_classdev_unregister(&lm->pwm[i].cdev);
+ }
kfree(lm);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index da3828fc2c0..f270447ba95 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -19,6 +19,7 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/platform_device.h>
@@ -37,7 +38,7 @@
#define KBC_ROW_SCAN_DLY 5
/* KBC uses a 32KHz clock so a cycle = 1/32Khz */
-#define KBC_CYCLE_USEC 32
+#define KBC_CYCLE_MS 32
/* KBC Registers */
@@ -647,7 +648,7 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
- kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000;
+ kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index c456f63b6ba..783597a9a64 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -21,6 +21,7 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input/kxtj9.h>
#include <linux/input-polldev.h>
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 20f8f9284f0..6c76cf79299 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/input-polldev.h>
+#include <linux/of_device.h>
#define MMA8450_DRV_NAME "mma8450"
@@ -229,10 +230,17 @@ static const struct i2c_device_id mma8450_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mma8450_id);
+static const struct of_device_id mma8450_dt_ids[] = {
+ { .compatible = "fsl,mma8450", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, mma8450_dt_ids);
+
static struct i2c_driver mma8450_driver = {
.driver = {
.name = MMA8450_DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mma8450_dt_ids,
},
.probe = mma8450_probe,
.remove = __devexit_p(mma8450_remove),
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 95577c15ae5..4d17d9f3320 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -32,6 +32,7 @@
#define DEBUG
#include <linux/slab.h>
#include <linux/input.h>
+#include <linux/module.h>
#include <linux/serio.h>
#include <linux/libps2.h>
#include <linux/delay.h>
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 80baa53da5b..d64c5a43aaa 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -23,7 +23,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
-
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index bc3b5187f3a..131f9d1c921 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -249,12 +249,14 @@ static void __ad7879_enable(struct ad7879 *ts)
static void __ad7879_disable(struct ad7879 *ts)
{
+ u16 reg = (ts->cmd_crtl2 & ~AD7879_PM(-1)) |
+ AD7879_PM(AD7879_PM_SHUTDOWN);
disable_irq(ts->irq);
if (del_timer_sync(&ts->timer))
ad7879_ts_event_release(ts);
- ad7879_write(ts, AD7879_REG_CTRL2, AD7879_PM(AD7879_PM_SHUTDOWN));
+ ad7879_write(ts, AD7879_REG_CTRL2, reg);
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 8420129fc5e..f75a66e7d31 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -241,12 +241,13 @@ config DM_MIRROR
needed for live data migration tools such as 'pvmove'.
config DM_RAID
- tristate "RAID 4/5/6 target (EXPERIMENTAL)"
+ tristate "RAID 1/4/5/6 target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL
+ select MD_RAID1
select MD_RAID456
select BLK_DEV_MD
---help---
- A dm target that supports RAID4, RAID5 and RAID6 mappings
+ A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings
A RAID-5 set of N drives with a capacity of C MB per drive provides
the capacity of C * (N - 1) MB, and protects against a failure
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index bae6c4e23d3..49da55c1528 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -30,7 +30,6 @@
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "crypt"
-#define MESG_STR(x) x, sizeof(x)
/*
* context holding the current state of a multi-part conversion
@@ -239,7 +238,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
memset(iv, 0, cc->iv_size);
- *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
+ *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
return 0;
}
@@ -248,7 +247,7 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
memset(iv, 0, cc->iv_size);
- *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
+ *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
return 0;
}
@@ -415,7 +414,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
memset(iv, 0, cc->iv_size);
- *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
+ *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
return 0;
@@ -1575,11 +1574,17 @@ bad_mem:
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
- unsigned int key_size;
+ unsigned int key_size, opt_params;
unsigned long long tmpll;
int ret;
+ struct dm_arg_set as;
+ const char *opt_string;
+
+ static struct dm_arg _args[] = {
+ {0, 1, "Invalid number of feature args"},
+ };
- if (argc != 5) {
+ if (argc < 5) {
ti->error = "Not enough arguments";
return -EINVAL;
}
@@ -1648,6 +1653,30 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
cc->start = tmpll;
+ argv += 5;
+ argc -= 5;
+
+ /* Optional parameters */
+ if (argc) {
+ as.argc = argc;
+ as.argv = argv;
+
+ ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+ if (ret)
+ goto bad;
+
+ opt_string = dm_shift_arg(&as);
+
+ if (opt_params == 1 && opt_string &&
+ !strcasecmp(opt_string, "allow_discards"))
+ ti->num_discard_requests = 1;
+ else if (opt_params) {
+ ret = -EINVAL;
+ ti->error = "Invalid feature arguments";
+ goto bad;
+ }
+ }
+
ret = -ENOMEM;
cc->io_queue = alloc_workqueue("kcryptd_io",
WQ_NON_REENTRANT|
@@ -1682,9 +1711,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
struct dm_crypt_io *io;
struct crypt_config *cc;
- if (bio->bi_rw & REQ_FLUSH) {
+ /*
+ * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
+ * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
+ * - for REQ_DISCARD caller must use flush if IO ordering matters
+ */
+ if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
cc = ti->private;
bio->bi_bdev = cc->dev->bdev;
+ if (bio_sectors(bio))
+ bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
return DM_MAPIO_REMAPPED;
}
@@ -1727,6 +1763,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
cc->dev->name, (unsigned long long)cc->start);
+
+ if (ti->num_discard_requests)
+ DMEMIT(" 1 allow_discards");
+
break;
}
return 0;
@@ -1770,12 +1810,12 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
if (argc < 2)
goto error;
- if (!strnicmp(argv[0], MESG_STR("key"))) {
+ if (!strcasecmp(argv[0], "key")) {
if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
DMWARN("not suspended during key manipulation.");
return -EINVAL;
}
- if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
+ if (argc == 3 && !strcasecmp(argv[1], "set")) {
ret = crypt_set_key(cc, argv[2]);
if (ret)
return ret;
@@ -1783,7 +1823,7 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
ret = cc->iv_gen_ops->init(cc);
return ret;
}
- if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
+ if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
ret = cc->iv_gen_ops->wipe(cc);
if (ret)
@@ -1823,7 +1863,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index ea790623c30..89f73ca22cf 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2003 Sistina Software (UK) Limited.
- * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -15,6 +15,9 @@
#define DM_MSG_PREFIX "flakey"
+#define all_corrupt_bio_flags_match(bio, fc) \
+ (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
+
/*
* Flakey: Used for testing only, simulates intermittent,
* catastrophic device failure.
@@ -25,60 +28,189 @@ struct flakey_c {
sector_t start;
unsigned up_interval;
unsigned down_interval;
+ unsigned long flags;
+ unsigned corrupt_bio_byte;
+ unsigned corrupt_bio_rw;
+ unsigned corrupt_bio_value;
+ unsigned corrupt_bio_flags;
+};
+
+enum feature_flag_bits {
+ DROP_WRITES
};
+static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ struct dm_target *ti)
+{
+ int r;
+ unsigned argc;
+ const char *arg_name;
+
+ static struct dm_arg _args[] = {
+ {0, 6, "Invalid number of feature args"},
+ {1, UINT_MAX, "Invalid corrupt bio byte"},
+ {0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
+ {0, UINT_MAX, "Invalid corrupt bio flags mask"},
+ };
+
+ /* No feature arguments supplied. */
+ if (!as->argc)
+ return 0;
+
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
+ if (r)
+ return r;
+
+ while (argc) {
+ arg_name = dm_shift_arg(as);
+ argc--;
+
+ /*
+ * drop_writes
+ */
+ if (!strcasecmp(arg_name, "drop_writes")) {
+ if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
+ ti->error = "Feature drop_writes duplicated";
+ return -EINVAL;
+ }
+
+ continue;
+ }
+
+ /*
+ * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
+ */
+ if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
+ if (!argc)
+ ti->error = "Feature corrupt_bio_byte requires parameters";
+
+ r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
+ if (r)
+ return r;
+ argc--;
+
+ /*
+ * Direction r or w?
+ */
+ arg_name = dm_shift_arg(as);
+ if (!strcasecmp(arg_name, "w"))
+ fc->corrupt_bio_rw = WRITE;
+ else if (!strcasecmp(arg_name, "r"))
+ fc->corrupt_bio_rw = READ;
+ else {
+ ti->error = "Invalid corrupt bio direction (r or w)";
+ return -EINVAL;
+ }
+ argc--;
+
+ /*
+ * Value of byte (0-255) to write in place of correct one.
+ */
+ r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
+ if (r)
+ return r;
+ argc--;
+
+ /*
+ * Only corrupt bios with these flags set.
+ */
+ r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
+ if (r)
+ return r;
+ argc--;
+
+ continue;
+ }
+
+ ti->error = "Unrecognised flakey feature requested";
+ return -EINVAL;
+ }
+
+ if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
+ ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
- * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval>
+ * Construct a flakey mapping:
+ * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
+ *
+ * Feature args:
+ * [drop_writes]
+ * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
+ *
+ * Nth_byte starts from 1 for the first byte.
+ * Direction is r for READ or w for WRITE.
+ * bio_flags is ignored if 0.
*/
static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
+ static struct dm_arg _args[] = {
+ {0, UINT_MAX, "Invalid up interval"},
+ {0, UINT_MAX, "Invalid down interval"},
+ };
+
+ int r;
struct flakey_c *fc;
- unsigned long long tmp;
+ unsigned long long tmpll;
+ struct dm_arg_set as;
+ const char *devname;
- if (argc != 4) {
- ti->error = "dm-flakey: Invalid argument count";
+ as.argc = argc;
+ as.argv = argv;
+
+ if (argc < 4) {
+ ti->error = "Invalid argument count";
return -EINVAL;
}
- fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+ fc = kzalloc(sizeof(*fc), GFP_KERNEL);
if (!fc) {
- ti->error = "dm-flakey: Cannot allocate linear context";
+ ti->error = "Cannot allocate linear context";
return -ENOMEM;
}
fc->start_time = jiffies;
- if (sscanf(argv[1], "%llu", &tmp) != 1) {
- ti->error = "dm-flakey: Invalid device sector";
+ devname = dm_shift_arg(&as);
+
+ if (sscanf(dm_shift_arg(&as), "%llu", &tmpll) != 1) {
+ ti->error = "Invalid device sector";
goto bad;
}
- fc->start = tmp;
+ fc->start = tmpll;
- if (sscanf(argv[2], "%u", &fc->up_interval) != 1) {
- ti->error = "dm-flakey: Invalid up interval";
+ r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
+ if (r)
goto bad;
- }
- if (sscanf(argv[3], "%u", &fc->down_interval) != 1) {
- ti->error = "dm-flakey: Invalid down interval";
+ r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
+ if (r)
goto bad;
- }
if (!(fc->up_interval + fc->down_interval)) {
- ti->error = "dm-flakey: Total (up + down) interval is zero";
+ ti->error = "Total (up + down) interval is zero";
goto bad;
}
if (fc->up_interval + fc->down_interval < fc->up_interval) {
- ti->error = "dm-flakey: Interval overflow";
+ ti->error = "Interval overflow";
goto bad;
}
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) {
- ti->error = "dm-flakey: Device lookup failed";
+ r = parse_features(&as, fc, ti);
+ if (r)
+ goto bad;
+
+ if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
+ ti->error = "Device lookup failed";
goto bad;
}
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
ti->private = fc;
return 0;
@@ -99,7 +231,7 @@ static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
{
struct flakey_c *fc = ti->private;
- return fc->start + (bi_sector - ti->begin);
+ return fc->start + dm_target_offset(ti, bi_sector);
}
static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
@@ -111,6 +243,25 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
}
+static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+{
+ unsigned bio_bytes = bio_cur_bytes(bio);
+ char *data = bio_data(bio);
+
+ /*
+ * Overwrite the Nth byte of the data returned.
+ */
+ if (data && bio_bytes >= fc->corrupt_bio_byte) {
+ data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
+
+ DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
+ "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
+ bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
+ bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+ }
+}
+
static int flakey_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
@@ -119,18 +270,71 @@ static int flakey_map(struct dm_target *ti, struct bio *bio,
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
- if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval)
+ if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
+ /*
+ * Flag this bio as submitted while down.
+ */
+ map_context->ll = 1;
+
+ /*
+ * Map reads as normal.
+ */
+ if (bio_data_dir(bio) == READ)
+ goto map_bio;
+
+ /*
+ * Drop writes?
+ */
+ if (test_bit(DROP_WRITES, &fc->flags)) {
+ bio_endio(bio, 0);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /*
+ * Corrupt matching writes.
+ */
+ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
+ if (all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+ goto map_bio;
+ }
+
+ /*
+ * By default, error all I/O.
+ */
return -EIO;
+ }
+map_bio:
flakey_map_bio(ti, bio);
return DM_MAPIO_REMAPPED;
}
+static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ int error, union map_info *map_context)
+{
+ struct flakey_c *fc = ti->private;
+ unsigned bio_submitted_while_down = map_context->ll;
+
+ /*
+ * Corrupt successful READs while in down state.
+ * If flags were specified, only corrupt those that match.
+ */
+ if (!error && bio_submitted_while_down &&
+ (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+
+ return error;
+}
+
static int flakey_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
+ unsigned sz = 0;
struct flakey_c *fc = ti->private;
+ unsigned drop_writes;
switch (type) {
case STATUSTYPE_INFO:
@@ -138,9 +342,22 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
- snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name,
- (unsigned long long)fc->start, fc->up_interval,
- fc->down_interval);
+ DMEMIT("%s %llu %u %u ", fc->dev->name,
+ (unsigned long long)fc->start, fc->up_interval,
+ fc->down_interval);
+
+ drop_writes = test_bit(DROP_WRITES, &fc->flags);
+ DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
+
+ if (drop_writes)
+ DMEMIT("drop_writes ");
+
+ if (fc->corrupt_bio_byte)
+ DMEMIT("corrupt_bio_byte %u %c %u %u ",
+ fc->corrupt_bio_byte,
+ (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
+ fc->corrupt_bio_value, fc->corrupt_bio_flags);
+
break;
}
return 0;
@@ -177,11 +394,12 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
.map = flakey_map,
+ .end_io = flakey_end_io,
.status = flakey_status,
.ioctl = flakey_ioctl,
.merge = flakey_merge,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2067288f61f..ad2eba40e31 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -38,6 +38,8 @@ struct io {
struct dm_io_client *client;
io_notify_fn callback;
void *context;
+ void *vma_invalidate_address;
+ unsigned long vma_invalidate_size;
} __attribute__((aligned(DM_IO_MAX_REGIONS)));
static struct kmem_cache *_dm_io_cache;
@@ -116,6 +118,10 @@ static void dec_count(struct io *io, unsigned int region, int error)
set_bit(region, &io->error_bits);
if (atomic_dec_and_test(&io->count)) {
+ if (io->vma_invalidate_size)
+ invalidate_kernel_vmap_range(io->vma_invalidate_address,
+ io->vma_invalidate_size);
+
if (io->sleeper)
wake_up_process(io->sleeper);
@@ -159,6 +165,9 @@ struct dpages {
unsigned context_u;
void *context_ptr;
+
+ void *vma_invalidate_address;
+ unsigned long vma_invalidate_size;
};
/*
@@ -377,6 +386,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->sleeper = current;
io->client = client;
+ io->vma_invalidate_address = dp->vma_invalidate_address;
+ io->vma_invalidate_size = dp->vma_invalidate_size;
+
dispatch_io(rw, num_regions, where, dp, io, 1);
while (1) {
@@ -415,13 +427,21 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->callback = fn;
io->context = context;
+ io->vma_invalidate_address = dp->vma_invalidate_address;
+ io->vma_invalidate_size = dp->vma_invalidate_size;
+
dispatch_io(rw, num_regions, where, dp, io, 0);
return 0;
}
-static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
+static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
+ unsigned long size)
{
/* Set up dpages based on memory type */
+
+ dp->vma_invalidate_address = NULL;
+ dp->vma_invalidate_size = 0;
+
switch (io_req->mem.type) {
case DM_IO_PAGE_LIST:
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
@@ -432,6 +452,11 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
break;
case DM_IO_VMA:
+ flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
+ if ((io_req->bi_rw & RW_MASK) == READ) {
+ dp->vma_invalidate_address = io_req->mem.ptr.vma;
+ dp->vma_invalidate_size = size;
+ }
vm_dp_init(dp, io_req->mem.ptr.vma);
break;
@@ -460,7 +485,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
int r;
struct dpages dp;
- r = dp_init(io_req, &dp);
+ r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
if (r)
return r;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4cacdad2270..2e9a3ca37bd 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -128,6 +128,24 @@ static struct hash_cell *__get_uuid_cell(const char *str)
return NULL;
}
+static struct hash_cell *__get_dev_cell(uint64_t dev)
+{
+ struct mapped_device *md;
+ struct hash_cell *hc;
+
+ md = dm_get_md(huge_decode_dev(dev));
+ if (!md)
+ return NULL;
+
+ hc = dm_get_mdptr(md);
+ if (!hc) {
+ dm_put(md);
+ return NULL;
+ }
+
+ return hc;
+}
+
/*-----------------------------------------------------------------
* Inserting, removing and renaming a device.
*---------------------------------------------------------------*/
@@ -718,25 +736,45 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
*/
static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
{
- struct mapped_device *md;
- void *mdptr = NULL;
+ struct hash_cell *hc = NULL;
- if (*param->uuid)
- return __get_uuid_cell(param->uuid);
+ if (*param->uuid) {
+ if (*param->name || param->dev)
+ return NULL;
- if (*param->name)
- return __get_name_cell(param->name);
+ hc = __get_uuid_cell(param->uuid);
+ if (!hc)
+ return NULL;
+ } else if (*param->name) {
+ if (param->dev)
+ return NULL;
- md = dm_get_md(huge_decode_dev(param->dev));
- if (!md)
- goto out;
+ hc = __get_name_cell(param->name);
+ if (!hc)
+ return NULL;
+ } else if (param->dev) {
+ hc = __get_dev_cell(param->dev);
+ if (!hc)
+ return NULL;
+ } else
+ return NULL;
- mdptr = dm_get_mdptr(md);
- if (!mdptr)
- dm_put(md);
+ /*
+ * Sneakily write in both the name and the uuid
+ * while we have the cell.
+ */
+ strlcpy(param->name, hc->name, sizeof(param->name));
+ if (hc->uuid)
+ strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
+ else
+ param->uuid[0] = '\0';
-out:
- return mdptr;
+ if (hc->new_map)
+ param->flags |= DM_INACTIVE_PRESENT_FLAG;
+ else
+ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+
+ return hc;
}
static struct mapped_device *find_device(struct dm_ioctl *param)
@@ -746,24 +784,8 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
down_read(&_hash_lock);
hc = __find_device_hash_cell(param);
- if (hc) {
+ if (hc)
md = hc->md;
-
- /*
- * Sneakily write in both the name and the uuid
- * while we have the cell.
- */
- strlcpy(param->name, hc->name, sizeof(param->name));
- if (hc->uuid)
- strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
- else
- param->uuid[0] = '\0';
-
- if (hc->new_map)
- param->flags |= DM_INACTIVE_PRESENT_FLAG;
- else
- param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
- }
up_read(&_hash_lock);
return md;
@@ -1402,6 +1424,11 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
goto out;
}
+ if (!argc) {
+ DMWARN("Empty message received.");
+ goto out;
+ }
+
table = dm_get_live_table(md);
if (!table)
goto out_argv;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 320401dec10..f8214702963 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -224,8 +224,6 @@ struct kcopyd_job {
unsigned int num_dests;
struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
- sector_t offset;
- unsigned int nr_pages;
struct page_list *pages;
/*
@@ -380,7 +378,7 @@ static int run_io_job(struct kcopyd_job *job)
.bi_rw = job->rw,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
- .mem.offset = job->offset,
+ .mem.offset = 0,
.notify.fn = complete_io,
.notify.context = job,
.client = job->kc->io_client,
@@ -397,10 +395,9 @@ static int run_io_job(struct kcopyd_job *job)
static int run_pages_job(struct kcopyd_job *job)
{
int r;
+ unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
- job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
- PAGE_SIZE >> 9);
- r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
+ r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
if (!r) {
/* this job is ready for io */
push(&job->kc->io_jobs, job);
@@ -602,8 +599,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->num_dests = num_dests;
memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
- job->offset = 0;
- job->nr_pages = 0;
job->pages = NULL;
job->fn = fn;
@@ -622,6 +617,37 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
}
EXPORT_SYMBOL(dm_kcopyd_copy);
+void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
+ dm_kcopyd_notify_fn fn, void *context)
+{
+ struct kcopyd_job *job;
+
+ job = mempool_alloc(kc->job_pool, GFP_NOIO);
+
+ memset(job, 0, sizeof(struct kcopyd_job));
+ job->kc = kc;
+ job->fn = fn;
+ job->context = context;
+
+ atomic_inc(&kc->nr_jobs);
+
+ return job;
+}
+EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
+
+void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
+{
+ struct kcopyd_job *job = j;
+ struct dm_kcopyd_client *kc = job->kc;
+
+ job->read_err = read_err;
+ job->write_err = write_err;
+
+ push(&kc->complete_jobs, job);
+ wake(kc);
+}
+EXPORT_SYMBOL(dm_kcopyd_do_callback);
+
/*
* Cancels a kcopyd job, eg. someone might be deactivating a
* mirror.
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index aa2e0c374ab..1021c898601 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -394,8 +394,7 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
group[count] = fe->region;
count++;
- list_del(&fe->list);
- list_add(&fe->list, &tmp_list);
+ list_move(&fe->list, &tmp_list);
type = fe->type;
if (count >= MAX_FLUSH_GROUP_COUNT)
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 948e3f4925b..3b52bb72bd1 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -197,15 +197,21 @@ EXPORT_SYMBOL(dm_dirty_log_destroy);
#define MIRROR_DISK_VERSION 2
#define LOG_OFFSET 2
-struct log_header {
- uint32_t magic;
+struct log_header_disk {
+ __le32 magic;
/*
* Simple, incrementing version. no backward
* compatibility.
*/
+ __le32 version;
+ __le64 nr_regions;
+} __packed;
+
+struct log_header_core {
+ uint32_t magic;
uint32_t version;
- sector_t nr_regions;
+ uint64_t nr_regions;
};
struct log_c {
@@ -239,10 +245,10 @@ struct log_c {
int log_dev_failed;
int log_dev_flush_failed;
struct dm_dev *log_dev;
- struct log_header header;
+ struct log_header_core header;
struct dm_io_region header_location;
- struct log_header *disk_header;
+ struct log_header_disk *disk_header;
};
/*
@@ -251,34 +257,34 @@ struct log_c {
*/
static inline int log_test_bit(uint32_t *bs, unsigned bit)
{
- return test_bit_le(bit, (unsigned long *) bs) ? 1 : 0;
+ return test_bit_le(bit, bs) ? 1 : 0;
}
static inline void log_set_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- __test_and_set_bit_le(bit, (unsigned long *) bs);
+ __set_bit_le(bit, bs);
l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- __test_and_clear_bit_le(bit, (unsigned long *) bs);
+ __clear_bit_le(bit, bs);
l->touched_dirtied = 1;
}
/*----------------------------------------------------------------
* Header IO
*--------------------------------------------------------------*/
-static void header_to_disk(struct log_header *core, struct log_header *disk)
+static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
{
disk->magic = cpu_to_le32(core->magic);
disk->version = cpu_to_le32(core->version);
disk->nr_regions = cpu_to_le64(core->nr_regions);
}
-static void header_from_disk(struct log_header *core, struct log_header *disk)
+static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk)
{
core->magic = le32_to_cpu(disk->magic);
core->version = le32_to_cpu(disk->version);
@@ -486,7 +492,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
lc->sync_count = (sync == NOSYNC) ? region_count : 0;
- lc->recovering_bits = vmalloc(bitset_size);
+ lc->recovering_bits = vzalloc(bitset_size);
if (!lc->recovering_bits) {
DMWARN("couldn't allocate sync bitset");
vfree(lc->sync_bits);
@@ -498,7 +504,6 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
kfree(lc);
return -ENOMEM;
}
- memset(lc->recovering_bits, 0, bitset_size);
lc->sync_search = 0;
log->context = lc;
@@ -739,8 +744,7 @@ static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
return 0;
do {
- *region = find_next_zero_bit_le(
- (unsigned long *) lc->sync_bits,
+ *region = find_next_zero_bit_le(lc->sync_bits,
lc->region_count,
lc->sync_search);
lc->sync_search = *region + 1;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c3547016f0f..5e0090ef418 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -22,7 +22,6 @@
#include <linux/atomic.h>
#define DM_MSG_PREFIX "multipath"
-#define MESG_STR(x) x, sizeof(x)
#define DM_PG_INIT_DELAY_MSECS 2000
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
@@ -505,80 +504,29 @@ static void trigger_event(struct work_struct *work)
* <#paths> <#per-path selector args>
* [<path> [<arg>]* ]+ ]+
*---------------------------------------------------------------*/
-struct param {
- unsigned min;
- unsigned max;
- char *error;
-};
-
-static int read_param(struct param *param, char *str, unsigned *v, char **error)
-{
- if (!str ||
- (sscanf(str, "%u", v) != 1) ||
- (*v < param->min) ||
- (*v > param->max)) {
- *error = param->error;
- return -EINVAL;
- }
-
- return 0;
-}
-
-struct arg_set {
- unsigned argc;
- char **argv;
-};
-
-static char *shift(struct arg_set *as)
-{
- char *r;
-
- if (as->argc) {
- as->argc--;
- r = *as->argv;
- as->argv++;
- return r;
- }
-
- return NULL;
-}
-
-static void consume(struct arg_set *as, unsigned n)
-{
- BUG_ON (as->argc < n);
- as->argc -= n;
- as->argv += n;
-}
-
-static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
+static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
struct dm_target *ti)
{
int r;
struct path_selector_type *pst;
unsigned ps_argc;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of path selector args"},
};
- pst = dm_get_path_selector(shift(as));
+ pst = dm_get_path_selector(dm_shift_arg(as));
if (!pst) {
ti->error = "unknown path selector type";
return -EINVAL;
}
- r = read_param(_params, shift(as), &ps_argc, &ti->error);
+ r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
if (r) {
dm_put_path_selector(pst);
return -EINVAL;
}
- if (ps_argc > as->argc) {
- dm_put_path_selector(pst);
- ti->error = "not enough arguments for path selector";
- return -EINVAL;
- }
-
r = pst->create(&pg->ps, ps_argc, as->argv);
if (r) {
dm_put_path_selector(pst);
@@ -587,12 +535,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
}
pg->ps.type = pst;
- consume(as, ps_argc);
+ dm_consume_args(as, ps_argc);
return 0;
}
-static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
+static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
struct dm_target *ti)
{
int r;
@@ -609,7 +557,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
if (!p)
return ERR_PTR(-ENOMEM);
- r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table),
+ r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
&p->path.dev);
if (r) {
ti->error = "error getting device";
@@ -660,16 +608,16 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
return ERR_PTR(r);
}
-static struct priority_group *parse_priority_group(struct arg_set *as,
+static struct priority_group *parse_priority_group(struct dm_arg_set *as,
struct multipath *m)
{
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{1, 1024, "invalid number of paths"},
{0, 1024, "invalid number of selector args"}
};
int r;
- unsigned i, nr_selector_args, nr_params;
+ unsigned i, nr_selector_args, nr_args;
struct priority_group *pg;
struct dm_target *ti = m->ti;
@@ -693,26 +641,26 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
/*
* read the paths
*/
- r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error);
+ r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
if (r)
goto bad;
- r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error);
+ r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
if (r)
goto bad;
- nr_params = 1 + nr_selector_args;
+ nr_args = 1 + nr_selector_args;
for (i = 0; i < pg->nr_pgpaths; i++) {
struct pgpath *pgpath;
- struct arg_set path_args;
+ struct dm_arg_set path_args;
- if (as->argc < nr_params) {
+ if (as->argc < nr_args) {
ti->error = "not enough path parameters";
r = -EINVAL;
goto bad;
}
- path_args.argc = nr_params;
+ path_args.argc = nr_args;
path_args.argv = as->argv;
pgpath = parse_path(&path_args, &pg->ps, ti);
@@ -723,7 +671,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
pgpath->pg = pg;
list_add_tail(&pgpath->list, &pg->pgpaths);
- consume(as, nr_params);
+ dm_consume_args(as, nr_args);
}
return pg;
@@ -733,28 +681,23 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
return ERR_PTR(r);
}
-static int parse_hw_handler(struct arg_set *as, struct multipath *m)
+static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{
unsigned hw_argc;
int ret;
struct dm_target *ti = m->ti;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of hardware handler args"},
};
- if (read_param(_params, shift(as), &hw_argc, &ti->error))
+ if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
return -EINVAL;
if (!hw_argc)
return 0;
- if (hw_argc > as->argc) {
- ti->error = "not enough arguments for hardware handler";
- return -EINVAL;
- }
-
- m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
+ m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
ti->error = "unknown hardware handler type";
@@ -778,7 +721,7 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
j = sprintf(p, "%s", as->argv[i]);
}
- consume(as, hw_argc - 1);
+ dm_consume_args(as, hw_argc - 1);
return 0;
fail:
@@ -787,20 +730,20 @@ fail:
return ret;
}
-static int parse_features(struct arg_set *as, struct multipath *m)
+static int parse_features(struct dm_arg_set *as, struct multipath *m)
{
int r;
unsigned argc;
struct dm_target *ti = m->ti;
- const char *param_name;
+ const char *arg_name;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 5, "invalid number of feature args"},
{1, 50, "pg_init_retries must be between 1 and 50"},
{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
};
- r = read_param(_params, shift(as), &argc, &ti->error);
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
if (r)
return -EINVAL;
@@ -808,26 +751,24 @@ static int parse_features(struct arg_set *as, struct multipath *m)
return 0;
do {
- param_name = shift(as);
+ arg_name = dm_shift_arg(as);
argc--;
- if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) {
+ if (!strcasecmp(arg_name, "queue_if_no_path")) {
r = queue_if_no_path(m, 1, 0);
continue;
}
- if (!strnicmp(param_name, MESG_STR("pg_init_retries")) &&
+ if (!strcasecmp(arg_name, "pg_init_retries") &&
(argc >= 1)) {
- r = read_param(_params + 1, shift(as),
- &m->pg_init_retries, &ti->error);
+ r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
argc--;
continue;
}
- if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) &&
+ if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
(argc >= 1)) {
- r = read_param(_params + 2, shift(as),
- &m->pg_init_delay_msecs, &ti->error);
+ r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
argc--;
continue;
}
@@ -842,15 +783,15 @@ static int parse_features(struct arg_set *as, struct multipath *m)
static int multipath_ctr(struct dm_target *ti, unsigned int argc,
char **argv)
{
- /* target parameters */
- static struct param _params[] = {
+ /* target arguments */
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of priority groups"},
{0, 1024, "invalid initial priority group number"},
};
int r;
struct multipath *m;
- struct arg_set as;
+ struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
@@ -871,11 +812,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
if (r)
goto bad;
- r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error);
+ r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
if (r)
goto bad;
- r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error);
+ r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
if (r)
goto bad;
@@ -1505,10 +1446,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
}
if (argc == 1) {
- if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) {
+ if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, 1, 0);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) {
+ } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, 0, 0);
goto out;
}
@@ -1519,18 +1460,18 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
goto out;
}
- if (!strnicmp(argv[0], MESG_STR("disable_group"))) {
+ if (!strcasecmp(argv[0], "disable_group")) {
r = bypass_pg_num(m, argv[1], 1);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) {
+ } else if (!strcasecmp(argv[0], "enable_group")) {
r = bypass_pg_num(m, argv[1], 0);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) {
+ } else if (!strcasecmp(argv[0], "switch_group")) {
r = switch_pg_num(m, argv[1]);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
+ } else if (!strcasecmp(argv[0], "reinstate_path"))
action = reinstate_path;
- else if (!strnicmp(argv[0], MESG_STR("fail_path")))
+ else if (!strcasecmp(argv[0], "fail_path"))
action = fail_path;
else {
DMWARN("Unrecognised multipath message received.");
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index e5d8904fc8f..a002dd85db1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -8,19 +8,19 @@
#include <linux/slab.h>
#include "md.h"
+#include "raid1.h"
#include "raid5.h"
-#include "dm.h"
#include "bitmap.h"
+#include <linux/device-mapper.h>
+
#define DM_MSG_PREFIX "raid"
/*
- * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then
- * make it so the flag doesn't set anything.
+ * The following flags are used by dm-raid.c to set up the array state.
+ * They must be cleared before md_run is called.
*/
-#ifndef MD_SYNC_STATE_FORCED
-#define MD_SYNC_STATE_FORCED 0
-#endif
+#define FirstUse 10 /* rdev flag */
struct raid_dev {
/*
@@ -43,14 +43,15 @@ struct raid_dev {
/*
* Flags for rs->print_flags field.
*/
-#define DMPF_DAEMON_SLEEP 0x1
-#define DMPF_MAX_WRITE_BEHIND 0x2
-#define DMPF_SYNC 0x4
-#define DMPF_NOSYNC 0x8
-#define DMPF_STRIPE_CACHE 0x10
-#define DMPF_MIN_RECOVERY_RATE 0x20
-#define DMPF_MAX_RECOVERY_RATE 0x40
-
+#define DMPF_SYNC 0x1
+#define DMPF_NOSYNC 0x2
+#define DMPF_REBUILD 0x4
+#define DMPF_DAEMON_SLEEP 0x8
+#define DMPF_MIN_RECOVERY_RATE 0x10
+#define DMPF_MAX_RECOVERY_RATE 0x20
+#define DMPF_MAX_WRITE_BEHIND 0x40
+#define DMPF_STRIPE_CACHE 0x80
+#define DMPF_REGION_SIZE 0X100
struct raid_set {
struct dm_target *ti;
@@ -72,6 +73,7 @@ static struct raid_type {
const unsigned level; /* RAID level. */
const unsigned algorithm; /* RAID algorithm. */
} raid_types[] = {
+ {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
{"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
{"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
{"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -105,7 +107,8 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
}
sectors_per_dev = ti->len;
- if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
+ if ((raid_type->level > 1) &&
+ sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
ti->error = "Target length not divisible by number of data devices";
return ERR_PTR(-EINVAL);
}
@@ -147,9 +150,16 @@ static void context_free(struct raid_set *rs)
{
int i;
- for (i = 0; i < rs->md.raid_disks; i++)
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if (rs->dev[i].meta_dev)
+ dm_put_device(rs->ti, rs->dev[i].meta_dev);
+ if (rs->dev[i].rdev.sb_page)
+ put_page(rs->dev[i].rdev.sb_page);
+ rs->dev[i].rdev.sb_page = NULL;
+ rs->dev[i].rdev.sb_loaded = 0;
if (rs->dev[i].data_dev)
dm_put_device(rs->ti, rs->dev[i].data_dev);
+ }
kfree(rs);
}
@@ -159,7 +169,16 @@ static void context_free(struct raid_set *rs)
* <meta_dev>: meta device name or '-' if missing
* <data_dev>: data device name or '-' if missing
*
- * This code parses those words.
+ * The following are permitted:
+ * - -
+ * - <data_dev>
+ * <meta_dev> <data_dev>
+ *
+ * The following is not allowed:
+ * <meta_dev> -
+ *
+ * This code parses those words. If there is a failure,
+ * the caller must use context_free to unwind the operations.
*/
static int dev_parms(struct raid_set *rs, char **argv)
{
@@ -182,8 +201,16 @@ static int dev_parms(struct raid_set *rs, char **argv)
rs->dev[i].rdev.mddev = &rs->md;
if (strcmp(argv[0], "-")) {
- rs->ti->error = "Metadata devices not supported";
- return -EINVAL;
+ ret = dm_get_device(rs->ti, argv[0],
+ dm_table_get_mode(rs->ti->table),
+ &rs->dev[i].meta_dev);
+ rs->ti->error = "RAID metadata device lookup failure";
+ if (ret)
+ return ret;
+
+ rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
+ if (!rs->dev[i].rdev.sb_page)
+ return -ENOMEM;
}
if (!strcmp(argv[1], "-")) {
@@ -193,6 +220,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
return -EINVAL;
}
+ rs->ti->error = "No data device supplied with metadata device";
+ if (rs->dev[i].meta_dev)
+ return -EINVAL;
+
continue;
}
@@ -204,6 +235,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
return ret;
}
+ if (rs->dev[i].meta_dev) {
+ metadata_available = 1;
+ rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
+ }
rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
@@ -235,33 +270,109 @@ static int dev_parms(struct raid_set *rs, char **argv)
}
/*
+ * validate_region_size
+ * @rs
+ * @region_size: region size in sectors. If 0, pick a size (4MiB default).
+ *
+ * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
+ * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
+ *
+ * Returns: 0 on success, -EINVAL on failure.
+ */
+static int validate_region_size(struct raid_set *rs, unsigned long region_size)
+{
+ unsigned long min_region_size = rs->ti->len / (1 << 21);
+
+ if (!region_size) {
+ /*
+ * Choose a reasonable default. All figures in sectors.
+ */
+ if (min_region_size > (1 << 13)) {
+ DMINFO("Choosing default region size of %lu sectors",
+ region_size);
+ region_size = min_region_size;
+ } else {
+ DMINFO("Choosing default region size of 4MiB");
+ region_size = 1 << 13; /* sectors */
+ }
+ } else {
+ /*
+ * Validate user-supplied value.
+ */
+ if (region_size > rs->ti->len) {
+ rs->ti->error = "Supplied region size is too large";
+ return -EINVAL;
+ }
+
+ if (region_size < min_region_size) {
+ DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
+ region_size, min_region_size);
+ rs->ti->error = "Supplied region size is too small";
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(region_size)) {
+ rs->ti->error = "Region size is not a power of 2";
+ return -EINVAL;
+ }
+
+ if (region_size < rs->md.chunk_sectors) {
+ rs->ti->error = "Region size is smaller than the chunk size";
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Convert sectors to bytes.
+ */
+ rs->md.bitmap_info.chunksize = (region_size << 9);
+
+ return 0;
+}
+
+/*
* Possible arguments are...
- * RAID456:
* <chunk_size> [optional_args]
*
- * Optional args:
- * [[no]sync] Force or prevent recovery of the entire array
+ * Argument definitions
+ * <chunk_size> The number of sectors per disk that
+ * will form the "stripe"
+ * [[no]sync] Force or prevent recovery of the
+ * entire array
* [rebuild <idx>] Rebuild the drive indicated by the index
- * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits
+ * [daemon_sleep <ms>] Time between bitmap daemon work to
+ * clear bits
* [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
* [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
+ * [write_mostly <idx>] Indicate a write mostly drive via index
* [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
+ * [region_size <sectors>] Defines granularity of bitmap
*/
static int parse_raid_params(struct raid_set *rs, char **argv,
unsigned num_raid_params)
{
unsigned i, rebuild_cnt = 0;
- unsigned long value;
+ unsigned long value, region_size = 0;
char *key;
/*
* First, parse the in-order required arguments
+ * "chunk_size" is the only argument of this type.
*/
- if ((strict_strtoul(argv[0], 10, &value) < 0) ||
- !is_power_of_2(value) || (value < 8)) {
+ if ((strict_strtoul(argv[0], 10, &value) < 0)) {
rs->ti->error = "Bad chunk size";
return -EINVAL;
+ } else if (rs->raid_type->level == 1) {
+ if (value)
+ DMERR("Ignoring chunk size parameter for RAID 1");
+ value = 0;
+ } else if (!is_power_of_2(value)) {
+ rs->ti->error = "Chunk size must be a power of 2";
+ return -EINVAL;
+ } else if (value < 8) {
+ rs->ti->error = "Chunk size value is too small";
+ return -EINVAL;
}
rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
@@ -269,22 +380,39 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
num_raid_params--;
/*
- * Second, parse the unordered optional arguments
+ * We set each individual device as In_sync with a completed
+ * 'recovery_offset'. If there has been a device failure or
+ * replacement then one of the following cases applies:
+ *
+ * 1) User specifies 'rebuild'.
+ * - Device is reset when param is read.
+ * 2) A new device is supplied.
+ * - No matching superblock found, resets device.
+ * 3) Device failure was transient and returns on reload.
+ * - Failure noticed, resets device for bitmap replay.
+ * 4) Device hadn't completed recovery after previous failure.
+ * - Superblock is read and overrides recovery_offset.
+ *
+ * What is found in the superblocks of the devices is always
+ * authoritative, unless 'rebuild' or '[no]sync' was specified.
*/
- for (i = 0; i < rs->md.raid_disks; i++)
+ for (i = 0; i < rs->md.raid_disks; i++) {
set_bit(In_sync, &rs->dev[i].rdev.flags);
+ rs->dev[i].rdev.recovery_offset = MaxSector;
+ }
+ /*
+ * Second, parse the unordered optional arguments
+ */
for (i = 0; i < num_raid_params; i++) {
- if (!strcmp(argv[i], "nosync")) {
+ if (!strcasecmp(argv[i], "nosync")) {
rs->md.recovery_cp = MaxSector;
rs->print_flags |= DMPF_NOSYNC;
- rs->md.flags |= MD_SYNC_STATE_FORCED;
continue;
}
- if (!strcmp(argv[i], "sync")) {
+ if (!strcasecmp(argv[i], "sync")) {
rs->md.recovery_cp = 0;
rs->print_flags |= DMPF_SYNC;
- rs->md.flags |= MD_SYNC_STATE_FORCED;
continue;
}
@@ -300,9 +428,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
- if (!strcmp(key, "rebuild")) {
- if (++rebuild_cnt > rs->raid_type->parity_devs) {
- rs->ti->error = "Too many rebuild drives given";
+ if (!strcasecmp(key, "rebuild")) {
+ rebuild_cnt++;
+ if (((rs->raid_type->level != 1) &&
+ (rebuild_cnt > rs->raid_type->parity_devs)) ||
+ ((rs->raid_type->level == 1) &&
+ (rebuild_cnt > (rs->md.raid_disks - 1)))) {
+ rs->ti->error = "Too many rebuild devices specified for given RAID type";
return -EINVAL;
}
if (value > rs->md.raid_disks) {
@@ -311,7 +443,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
clear_bit(In_sync, &rs->dev[value].rdev.flags);
rs->dev[value].rdev.recovery_offset = 0;
- } else if (!strcmp(key, "max_write_behind")) {
+ rs->print_flags |= DMPF_REBUILD;
+ } else if (!strcasecmp(key, "write_mostly")) {
+ if (rs->raid_type->level != 1) {
+ rs->ti->error = "write_mostly option is only valid for RAID1";
+ return -EINVAL;
+ }
+ if (value > rs->md.raid_disks) {
+ rs->ti->error = "Invalid write_mostly drive index given";
+ return -EINVAL;
+ }
+ set_bit(WriteMostly, &rs->dev[value].rdev.flags);
+ } else if (!strcasecmp(key, "max_write_behind")) {
+ if (rs->raid_type->level != 1) {
+ rs->ti->error = "max_write_behind option is only valid for RAID1";
+ return -EINVAL;
+ }
rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
/*
@@ -324,14 +471,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
rs->md.bitmap_info.max_write_behind = value;
- } else if (!strcmp(key, "daemon_sleep")) {
+ } else if (!strcasecmp(key, "daemon_sleep")) {
rs->print_flags |= DMPF_DAEMON_SLEEP;
if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
rs->ti->error = "daemon sleep period out of range";
return -EINVAL;
}
rs->md.bitmap_info.daemon_sleep = value;
- } else if (!strcmp(key, "stripe_cache")) {
+ } else if (!strcasecmp(key, "stripe_cache")) {
rs->print_flags |= DMPF_STRIPE_CACHE;
/*
@@ -348,20 +495,23 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "Bad stripe_cache size";
return -EINVAL;
}
- } else if (!strcmp(key, "min_recovery_rate")) {
+ } else if (!strcasecmp(key, "min_recovery_rate")) {
rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "min_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_min = (int)value;
- } else if (!strcmp(key, "max_recovery_rate")) {
+ } else if (!strcasecmp(key, "max_recovery_rate")) {
rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "max_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_max = (int)value;
+ } else if (!strcasecmp(key, "region_size")) {
+ rs->print_flags |= DMPF_REGION_SIZE;
+ region_size = value;
} else {
DMERR("Unable to parse RAID parameter: %s", key);
rs->ti->error = "Unable to parse RAID parameters";
@@ -369,6 +519,19 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
}
+ if (validate_region_size(rs, region_size))
+ return -EINVAL;
+
+ if (rs->md.chunk_sectors)
+ rs->ti->split_io = rs->md.chunk_sectors;
+ else
+ rs->ti->split_io = region_size;
+
+ if (rs->md.chunk_sectors)
+ rs->ti->split_io = rs->md.chunk_sectors;
+ else
+ rs->ti->split_io = region_size;
+
/* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0;
rs->md.external = 1;
@@ -387,17 +550,351 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
{
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
+ if (rs->raid_type->level == 1)
+ return md_raid1_congested(&rs->md, bits);
+
return md_raid5_congested(&rs->md, bits);
}
/*
+ * This structure is never routinely used by userspace, unlike md superblocks.
+ * Devices with this superblock should only ever be accessed via device-mapper.
+ */
+#define DM_RAID_MAGIC 0x64526D44
+struct dm_raid_superblock {
+ __le32 magic; /* "DmRd" */
+ __le32 features; /* Used to indicate possible future changes */
+
+ __le32 num_devices; /* Number of devices in this array. (Max 64) */
+ __le32 array_position; /* The position of this drive in the array */
+
+ __le64 events; /* Incremented by md when superblock updated */
+ __le64 failed_devices; /* Bit field of devices to indicate failures */
+
+ /*
+ * This offset tracks the progress of the repair or replacement of
+ * an individual drive.
+ */
+ __le64 disk_recovery_offset;
+
+ /*
+ * This offset tracks the progress of the initial array
+ * synchronisation/parity calculation.
+ */
+ __le64 array_resync_offset;
+
+ /*
+ * RAID characteristics
+ */
+ __le32 level;
+ __le32 layout;
+ __le32 stripe_sectors;
+
+ __u8 pad[452]; /* Round struct to 512 bytes. */
+ /* Always set to 0 when writing. */
+} __packed;
+
+static int read_disk_sb(mdk_rdev_t *rdev, int size)
+{
+ BUG_ON(!rdev->sb_page);
+
+ if (rdev->sb_loaded)
+ return 0;
+
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
+ DMERR("Failed to read device superblock");
+ return -EINVAL;
+ }
+
+ rdev->sb_loaded = 1;
+
+ return 0;
+}
+
+static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ mdk_rdev_t *r, *t;
+ uint64_t failed_devices;
+ struct dm_raid_superblock *sb;
+
+ sb = page_address(rdev->sb_page);
+ failed_devices = le64_to_cpu(sb->failed_devices);
+
+ rdev_for_each(r, t, mddev)
+ if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags))
+ failed_devices |= (1ULL << r->raid_disk);
+
+ memset(sb, 0, sizeof(*sb));
+
+ sb->magic = cpu_to_le32(DM_RAID_MAGIC);
+ sb->features = cpu_to_le32(0); /* No features yet */
+
+ sb->num_devices = cpu_to_le32(mddev->raid_disks);
+ sb->array_position = cpu_to_le32(rdev->raid_disk);
+
+ sb->events = cpu_to_le64(mddev->events);
+ sb->failed_devices = cpu_to_le64(failed_devices);
+
+ sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
+ sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
+
+ sb->level = cpu_to_le32(mddev->level);
+ sb->layout = cpu_to_le32(mddev->layout);
+ sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
+}
+
+/*
+ * super_load
+ *
+ * This function creates a superblock if one is not found on the device
+ * and will decide which superblock to use if there's a choice.
+ *
+ * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
+ */
+static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
+{
+ int ret;
+ struct dm_raid_superblock *sb;
+ struct dm_raid_superblock *refsb;
+ uint64_t events_sb, events_refsb;
+
+ rdev->sb_start = 0;
+ rdev->sb_size = sizeof(*sb);
+
+ ret = read_disk_sb(rdev, rdev->sb_size);
+ if (ret)
+ return ret;
+
+ sb = page_address(rdev->sb_page);
+ if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) {
+ super_sync(rdev->mddev, rdev);
+
+ set_bit(FirstUse, &rdev->flags);
+
+ /* Force writing of superblocks to disk */
+ set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
+
+ /* Any superblock is better than none, choose that if given */
+ return refdev ? 0 : 1;
+ }
+
+ if (!refdev)
+ return 1;
+
+ events_sb = le64_to_cpu(sb->events);
+
+ refsb = page_address(refdev->sb_page);
+ events_refsb = le64_to_cpu(refsb->events);
+
+ return (events_sb > events_refsb) ? 1 : 0;
+}
+
+static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ int role;
+ struct raid_set *rs = container_of(mddev, struct raid_set, md);
+ uint64_t events_sb;
+ uint64_t failed_devices;
+ struct dm_raid_superblock *sb;
+ uint32_t new_devs = 0;
+ uint32_t rebuilds = 0;
+ mdk_rdev_t *r, *t;
+ struct dm_raid_superblock *sb2;
+
+ sb = page_address(rdev->sb_page);
+ events_sb = le64_to_cpu(sb->events);
+ failed_devices = le64_to_cpu(sb->failed_devices);
+
+ /*
+ * Initialise to 1 if this is a new superblock.
+ */
+ mddev->events = events_sb ? : 1;
+
+ /*
+ * Reshaping is not currently allowed
+ */
+ if ((le32_to_cpu(sb->level) != mddev->level) ||
+ (le32_to_cpu(sb->layout) != mddev->layout) ||
+ (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
+ DMERR("Reshaping arrays not yet supported.");
+ return -EINVAL;
+ }
+
+ /* We can only change the number of devices in RAID1 right now */
+ if ((rs->raid_type->level != 1) &&
+ (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
+ DMERR("Reshaping arrays not yet supported.");
+ return -EINVAL;
+ }
+
+ if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
+ mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
+
+ /*
+ * During load, we set FirstUse if a new superblock was written.
+ * There are two reasons we might not have a superblock:
+ * 1) The array is brand new - in which case, all of the
+ * devices must have their In_sync bit set. Also,
+ * recovery_cp must be 0, unless forced.
+ * 2) This is a new device being added to an old array
+ * and the new device needs to be rebuilt - in which
+ * case the In_sync bit will /not/ be set and
+ * recovery_cp must be MaxSector.
+ */
+ rdev_for_each(r, t, mddev) {
+ if (!test_bit(In_sync, &r->flags)) {
+ if (!test_bit(FirstUse, &r->flags))
+ DMERR("Superblock area of "
+ "rebuild device %d should have been "
+ "cleared.", r->raid_disk);
+ set_bit(FirstUse, &r->flags);
+ rebuilds++;
+ } else if (test_bit(FirstUse, &r->flags))
+ new_devs++;
+ }
+
+ if (!rebuilds) {
+ if (new_devs == mddev->raid_disks) {
+ DMINFO("Superblocks created for new array");
+ set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+ } else if (new_devs) {
+ DMERR("New device injected "
+ "into existing array without 'rebuild' "
+ "parameter specified");
+ return -EINVAL;
+ }
+ } else if (new_devs) {
+ DMERR("'rebuild' devices cannot be "
+ "injected into an array with other first-time devices");
+ return -EINVAL;
+ } else if (mddev->recovery_cp != MaxSector) {
+ DMERR("'rebuild' specified while array is not in-sync");
+ return -EINVAL;
+ }
+
+ /*
+ * Now we set the Faulty bit for those devices that are
+ * recorded in the superblock as failed.
+ */
+ rdev_for_each(r, t, mddev) {
+ if (!r->sb_page)
+ continue;
+ sb2 = page_address(r->sb_page);
+ sb2->failed_devices = 0;
+
+ /*
+ * Check for any device re-ordering.
+ */
+ if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
+ role = le32_to_cpu(sb2->array_position);
+ if (role != r->raid_disk) {
+ if (rs->raid_type->level != 1) {
+ rs->ti->error = "Cannot change device "
+ "positions in RAID array";
+ return -EINVAL;
+ }
+ DMINFO("RAID1 device #%d now at position #%d",
+ role, r->raid_disk);
+ }
+
+ /*
+ * Partial recovery is performed on
+ * returning failed devices.
+ */
+ if (failed_devices & (1 << role))
+ set_bit(Faulty, &r->flags);
+ }
+ }
+
+ return 0;
+}
+
+static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ struct dm_raid_superblock *sb = page_address(rdev->sb_page);
+
+ /*
+ * If mddev->events is not set, we know we have not yet initialized
+ * the array.
+ */
+ if (!mddev->events && super_init_validation(mddev, rdev))
+ return -EINVAL;
+
+ mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
+ rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
+ if (!test_bit(FirstUse, &rdev->flags)) {
+ rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
+ if (rdev->recovery_offset != MaxSector)
+ clear_bit(In_sync, &rdev->flags);
+ }
+
+ /*
+ * If a device comes back, set it as not In_sync and no longer faulty.
+ */
+ if (test_bit(Faulty, &rdev->flags)) {
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
+ rdev->saved_raid_disk = rdev->raid_disk;
+ rdev->recovery_offset = 0;
+ }
+
+ clear_bit(FirstUse, &rdev->flags);
+
+ return 0;
+}
+
+/*
+ * Analyse superblocks and select the freshest.
+ */
+static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
+{
+ int ret;
+ mdk_rdev_t *rdev, *freshest, *tmp;
+ mddev_t *mddev = &rs->md;
+
+ freshest = NULL;
+ rdev_for_each(rdev, tmp, mddev) {
+ if (!rdev->meta_bdev)
+ continue;
+
+ ret = super_load(rdev, freshest);
+
+ switch (ret) {
+ case 1:
+ freshest = rdev;
+ break;
+ case 0:
+ break;
+ default:
+ ti->error = "Failed to load superblock";
+ return ret;
+ }
+ }
+
+ if (!freshest)
+ return 0;
+
+ /*
+ * Validation of the freshest device provides the source of
+ * validation for the remaining devices.
+ */
+ ti->error = "Unable to assemble array: Invalid superblocks";
+ if (super_validate(mddev, freshest))
+ return -EINVAL;
+
+ rdev_for_each(rdev, tmp, mddev)
+ if ((rdev != freshest) && super_validate(mddev, rdev))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
* Construct a RAID4/5/6 mapping:
* Args:
* <raid_type> <#raid_params> <raid_params> \
* <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
*
- * ** metadata devices are not supported yet, use '-' instead **
- *
* <raid_params> varies by <raid_type>. See 'parse_raid_params' for
* details on possible <raid_params>.
*/
@@ -465,8 +962,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (ret)
goto bad;
+ rs->md.sync_super = super_sync;
+ ret = analyse_superblocks(ti, rs);
+ if (ret)
+ goto bad;
+
INIT_WORK(&rs->md.event_work, do_table_event);
- ti->split_io = rs->md.chunk_sectors;
ti->private = rs;
mutex_lock(&rs->md.reconfig_mutex);
@@ -482,6 +983,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
+ mddev_suspend(&rs->md);
return 0;
bad:
@@ -546,12 +1048,17 @@ static int raid_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
/* The string you would use to construct this array */
- for (i = 0; i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev &&
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if ((rs->print_flags & DMPF_REBUILD) &&
+ rs->dev[i].data_dev &&
!test_bit(In_sync, &rs->dev[i].rdev.flags))
- raid_param_cnt++; /* for rebuilds */
+ raid_param_cnt += 2; /* for rebuilds */
+ if (rs->dev[i].data_dev &&
+ test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ raid_param_cnt += 2;
+ }
- raid_param_cnt += (hweight64(rs->print_flags) * 2);
+ raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2);
if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
raid_param_cnt--;
@@ -565,7 +1072,8 @@ static int raid_status(struct dm_target *ti, status_type_t type,
DMEMIT(" nosync");
for (i = 0; i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev &&
+ if ((rs->print_flags & DMPF_REBUILD) &&
+ rs->dev[i].data_dev &&
!test_bit(In_sync, &rs->dev[i].rdev.flags))
DMEMIT(" rebuild %u", i);
@@ -579,6 +1087,11 @@ static int raid_status(struct dm_target *ti, status_type_t type,
if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
+ for (i = 0; i < rs->md.raid_disks; i++)
+ if (rs->dev[i].data_dev &&
+ test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ DMEMIT(" write_mostly %u", i);
+
if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
DMEMIT(" max_write_behind %lu",
rs->md.bitmap_info.max_write_behind);
@@ -591,9 +1104,16 @@ static int raid_status(struct dm_target *ti, status_type_t type,
conf ? conf->max_nr_stripes * 2 : 0);
}
+ if (rs->print_flags & DMPF_REGION_SIZE)
+ DMEMIT(" region_size %lu",
+ rs->md.bitmap_info.chunksize >> 9);
+
DMEMIT(" %d", rs->md.raid_disks);
for (i = 0; i < rs->md.raid_disks; i++) {
- DMEMIT(" -"); /* metadata device */
+ if (rs->dev[i].meta_dev)
+ DMEMIT(" %s", rs->dev[i].meta_dev->name);
+ else
+ DMEMIT(" -");
if (rs->dev[i].data_dev)
DMEMIT(" %s", rs->dev[i].data_dev->name);
@@ -650,12 +1170,13 @@ static void raid_resume(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
+ bitmap_load(&rs->md);
mddev_resume(&rs->md);
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 135c2f1fdbf..d1f1d701710 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -58,25 +58,30 @@
#define NUM_SNAPSHOT_HDR_CHUNKS 1
struct disk_header {
- uint32_t magic;
+ __le32 magic;
/*
* Is this snapshot valid. There is no way of recovering
* an invalid snapshot.
*/
- uint32_t valid;
+ __le32 valid;
/*
* Simple, incrementing version. no backward
* compatibility.
*/
- uint32_t version;
+ __le32 version;
/* In sectors */
- uint32_t chunk_size;
-};
+ __le32 chunk_size;
+} __packed;
struct disk_exception {
+ __le64 old_chunk;
+ __le64 new_chunk;
+} __packed;
+
+struct core_exception {
uint64_t old_chunk;
uint64_t new_chunk;
};
@@ -169,10 +174,9 @@ static int alloc_area(struct pstore *ps)
if (!ps->area)
goto err_area;
- ps->zero_area = vmalloc(len);
+ ps->zero_area = vzalloc(len);
if (!ps->zero_area)
goto err_zero_area;
- memset(ps->zero_area, 0, len);
ps->header_area = vmalloc(len);
if (!ps->header_area)
@@ -396,32 +400,32 @@ static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
}
static void read_exception(struct pstore *ps,
- uint32_t index, struct disk_exception *result)
+ uint32_t index, struct core_exception *result)
{
- struct disk_exception *e = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, index);
/* copy it */
- result->old_chunk = le64_to_cpu(e->old_chunk);
- result->new_chunk = le64_to_cpu(e->new_chunk);
+ result->old_chunk = le64_to_cpu(de->old_chunk);
+ result->new_chunk = le64_to_cpu(de->new_chunk);
}
static void write_exception(struct pstore *ps,
- uint32_t index, struct disk_exception *de)
+ uint32_t index, struct core_exception *e)
{
- struct disk_exception *e = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, index);
/* copy it */
- e->old_chunk = cpu_to_le64(de->old_chunk);
- e->new_chunk = cpu_to_le64(de->new_chunk);
+ de->old_chunk = cpu_to_le64(e->old_chunk);
+ de->new_chunk = cpu_to_le64(e->new_chunk);
}
static void clear_exception(struct pstore *ps, uint32_t index)
{
- struct disk_exception *e = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, index);
/* clear it */
- e->old_chunk = 0;
- e->new_chunk = 0;
+ de->old_chunk = 0;
+ de->new_chunk = 0;
}
/*
@@ -437,13 +441,13 @@ static int insert_exceptions(struct pstore *ps,
{
int r;
unsigned int i;
- struct disk_exception de;
+ struct core_exception e;
/* presume the area is full */
*full = 1;
for (i = 0; i < ps->exceptions_per_area; i++) {
- read_exception(ps, i, &de);
+ read_exception(ps, i, &e);
/*
* If the new_chunk is pointing at the start of
@@ -451,7 +455,7 @@ static int insert_exceptions(struct pstore *ps,
* is we know that we've hit the end of the
* exceptions. Therefore the area is not full.
*/
- if (de.new_chunk == 0LL) {
+ if (e.new_chunk == 0LL) {
ps->current_committed = i;
*full = 0;
break;
@@ -460,13 +464,13 @@ static int insert_exceptions(struct pstore *ps,
/*
* Keep track of the start of the free chunks.
*/
- if (ps->next_free <= de.new_chunk)
- ps->next_free = de.new_chunk + 1;
+ if (ps->next_free <= e.new_chunk)
+ ps->next_free = e.new_chunk + 1;
/*
* Otherwise we add the exception to the snapshot.
*/
- r = callback(callback_context, de.old_chunk, de.new_chunk);
+ r = callback(callback_context, e.old_chunk, e.new_chunk);
if (r)
return r;
}
@@ -563,7 +567,7 @@ static int persistent_read_metadata(struct dm_exception_store *store,
ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
sizeof(struct disk_exception);
ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
- sizeof(*ps->callbacks));
+ sizeof(*ps->callbacks));
if (!ps->callbacks)
return -ENOMEM;
@@ -641,12 +645,12 @@ static void persistent_commit_exception(struct dm_exception_store *store,
{
unsigned int i;
struct pstore *ps = get_info(store);
- struct disk_exception de;
+ struct core_exception ce;
struct commit_callback *cb;
- de.old_chunk = e->old_chunk;
- de.new_chunk = e->new_chunk;
- write_exception(ps, ps->current_committed++, &de);
+ ce.old_chunk = e->old_chunk;
+ ce.new_chunk = e->new_chunk;
+ write_exception(ps, ps->current_committed++, &ce);
/*
* Add the callback to the back of the array. This code
@@ -670,7 +674,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
* If we completely filled the current area, then wipe the next one.
*/
if ((ps->current_committed == ps->exceptions_per_area) &&
- zero_disk_area(ps, ps->current_area + 1))
+ zero_disk_area(ps, ps->current_area + 1))
ps->valid = 0;
/*
@@ -701,7 +705,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
chunk_t *last_new_chunk)
{
struct pstore *ps = get_info(store);
- struct disk_exception de;
+ struct core_exception ce;
int nr_consecutive;
int r;
@@ -722,9 +726,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
ps->current_committed = ps->exceptions_per_area;
}
- read_exception(ps, ps->current_committed - 1, &de);
- *last_old_chunk = de.old_chunk;
- *last_new_chunk = de.new_chunk;
+ read_exception(ps, ps->current_committed - 1, &ce);
+ *last_old_chunk = ce.old_chunk;
+ *last_new_chunk = ce.new_chunk;
/*
* Find number of consecutive chunks within the current area,
@@ -733,9 +737,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
nr_consecutive++) {
read_exception(ps, ps->current_committed - 1 - nr_consecutive,
- &de);
- if (de.old_chunk != *last_old_chunk - nr_consecutive ||
- de.new_chunk != *last_new_chunk - nr_consecutive)
+ &ce);
+ if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
+ ce.new_chunk != *last_new_chunk - nr_consecutive)
break;
}
@@ -753,7 +757,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, WRITE);
+ r = area_io(ps, WRITE_FLUSH_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 9ecff5f3023..6f758870fc1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -30,16 +30,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
((ti)->type->name == dm_snapshot_merge_target_name)
/*
- * The percentage increment we will wake up users at
- */
-#define WAKE_UP_PERCENT 5
-
-/*
- * kcopyd priority of snapshot operations
- */
-#define SNAPSHOT_COPY_PRIORITY 2
-
-/*
* The size of the mempool used to track chunks in use.
*/
#define MIN_IOS 256
@@ -180,6 +170,13 @@ struct dm_snap_pending_exception {
* kcopyd.
*/
int started;
+
+ /*
+ * For writing a complete chunk, bypassing the copy.
+ */
+ struct bio *full_bio;
+ bio_end_io_t *full_bio_end_io;
+ void *full_bio_private;
};
/*
@@ -1055,8 +1052,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
- ti->error = "Cannot allocate snapshot context private "
- "structure";
+ ti->error = "Cannot allocate private snapshot structure";
r = -ENOMEM;
goto bad;
}
@@ -1380,6 +1376,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
struct dm_snapshot *s = pe->snap;
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
+ struct bio *full_bio = NULL;
int error = 0;
if (!success) {
@@ -1415,10 +1412,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
*/
dm_insert_exception(&s->complete, e);
- out:
+out:
dm_remove_exception(&pe->e);
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = bio_list_get(&pe->origin_bios);
+ full_bio = pe->full_bio;
+ if (full_bio) {
+ full_bio->bi_end_io = pe->full_bio_end_io;
+ full_bio->bi_private = pe->full_bio_private;
+ }
free_pending_exception(pe);
increment_pending_exceptions_done_count();
@@ -1426,10 +1428,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
up_write(&s->lock);
/* Submit any pending write bios */
- if (error)
+ if (error) {
+ if (full_bio)
+ bio_io_error(full_bio);
error_bios(snapshot_bios);
- else
+ } else {
+ if (full_bio)
+ bio_endio(full_bio, 0);
flush_bios(snapshot_bios);
+ }
retry_origin_bios(s, origin_bios);
}
@@ -1480,8 +1487,33 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
- dm_kcopyd_copy(s->kcopyd_client,
- &src, 1, &dest, 0, copy_callback, pe);
+ dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
+}
+
+static void full_bio_end_io(struct bio *bio, int error)
+{
+ void *callback_data = bio->bi_private;
+
+ dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
+}
+
+static void start_full_bio(struct dm_snap_pending_exception *pe,
+ struct bio *bio)
+{
+ struct dm_snapshot *s = pe->snap;
+ void *callback_data;
+
+ pe->full_bio = bio;
+ pe->full_bio_end_io = bio->bi_end_io;
+ pe->full_bio_private = bio->bi_private;
+
+ callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
+ copy_callback, pe);
+
+ bio->bi_end_io = full_bio_end_io;
+ bio->bi_private = callback_data;
+
+ generic_make_request(bio);
}
static struct dm_snap_pending_exception *
@@ -1519,6 +1551,7 @@ __find_pending_exception(struct dm_snapshot *s,
bio_list_init(&pe->origin_bios);
bio_list_init(&pe->snapshot_bios);
pe->started = 0;
+ pe->full_bio = NULL;
if (s->store->type->prepare_exception(s->store, &pe->e)) {
free_pending_exception(pe);
@@ -1612,10 +1645,19 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
}
remap_exception(s, &pe->e, bio, chunk);
- bio_list_add(&pe->snapshot_bios, bio);
r = DM_MAPIO_SUBMITTED;
+ if (!pe->started &&
+ bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+ pe->started = 1;
+ up_write(&s->lock);
+ start_full_bio(pe, bio);
+ goto out;
+ }
+
+ bio_list_add(&pe->snapshot_bios, bio);
+
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
@@ -1628,9 +1670,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
map_context->ptr = track_chunk(s, chunk);
}
- out_unlock:
+out_unlock:
up_write(&s->lock);
- out:
+out:
return r;
}
@@ -1974,7 +2016,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe_to_start_now = pe;
}
- next_snapshot:
+next_snapshot:
up_write(&snap->lock);
if (pe_to_start_now) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index bfe9c2333ce..986b8754bb0 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -54,7 +54,6 @@ struct dm_table {
sector_t *highs;
struct dm_target *targets;
- unsigned discards_supported:1;
unsigned integrity_supported:1;
/*
@@ -154,12 +153,11 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
return NULL;
size = nmemb * elem_size;
- addr = vmalloc(size);
- if (addr)
- memset(addr, 0, size);
+ addr = vzalloc(size);
return addr;
}
+EXPORT_SYMBOL(dm_vcalloc);
/*
* highs, and targets are managed as dynamic arrays during a
@@ -209,7 +207,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
INIT_LIST_HEAD(&t->devices);
INIT_LIST_HEAD(&t->target_callbacks);
atomic_set(&t->holders, 0);
- t->discards_supported = 1;
if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -281,6 +278,7 @@ void dm_table_get(struct dm_table *t)
{
atomic_inc(&t->holders);
}
+EXPORT_SYMBOL(dm_table_get);
void dm_table_put(struct dm_table *t)
{
@@ -290,6 +288,7 @@ void dm_table_put(struct dm_table *t)
smp_mb__before_atomic_dec();
atomic_dec(&t->holders);
}
+EXPORT_SYMBOL(dm_table_put);
/*
* Checks to see if we need to extend highs or targets.
@@ -455,13 +454,14 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
-static int __table_get_device(struct dm_table *t, struct dm_target *ti,
- const char *path, fmode_t mode, struct dm_dev **result)
+int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ struct dm_dev **result)
{
int r;
dev_t uninitialized_var(dev);
struct dm_dev_internal *dd;
unsigned int major, minor;
+ struct dm_table *t = ti->table;
BUG_ON(!t);
@@ -509,6 +509,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
*result = &dd->dm_dev;
return 0;
}
+EXPORT_SYMBOL(dm_get_device);
int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@@ -539,23 +540,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
* If not we'll force DM to use PAGE_SIZE or
* smaller I/O, just to be safe.
*/
-
- if (q->merge_bvec_fn && !ti->type->merge)
+ if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
blk_limits_max_hw_sectors(limits,
(unsigned int) (PAGE_SIZE >> 9));
return 0;
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
- struct dm_dev **result)
-{
- return __table_get_device(ti->table, ti, path, mode, result);
-}
-
-
/*
- * Decrement a devices use count and remove it if necessary.
+ * Decrement a device's use count and remove it if necessary.
*/
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
@@ -568,6 +561,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
kfree(dd);
}
}
+EXPORT_SYMBOL(dm_put_device);
/*
* Checks to see if the target joins onto the end of the table.
@@ -791,8 +785,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- if (!tgt->num_discard_requests)
- t->discards_supported = 0;
+ if (!tgt->num_discard_requests && tgt->discards_supported)
+ DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
+ dm_device_name(t->md), type);
return 0;
@@ -802,6 +797,63 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return r;
}
+/*
+ * Target argument parsing helpers.
+ */
+static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error, unsigned grouped)
+{
+ const char *arg_str = dm_shift_arg(arg_set);
+
+ if (!arg_str ||
+ (sscanf(arg_str, "%u", value) != 1) ||
+ (*value < arg->min) ||
+ (*value > arg->max) ||
+ (grouped && arg_set->argc < *value)) {
+ *error = arg->error;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error)
+{
+ return validate_next_arg(arg, arg_set, value, error, 0);
+}
+EXPORT_SYMBOL(dm_read_arg);
+
+int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error)
+{
+ return validate_next_arg(arg, arg_set, value, error, 1);
+}
+EXPORT_SYMBOL(dm_read_arg_group);
+
+const char *dm_shift_arg(struct dm_arg_set *as)
+{
+ char *r;
+
+ if (as->argc) {
+ as->argc--;
+ r = *as->argv;
+ as->argv++;
+ return r;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(dm_shift_arg);
+
+void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
+{
+ BUG_ON(as->argc < num_args);
+ as->argc -= num_args;
+ as->argv += num_args;
+}
+EXPORT_SYMBOL(dm_consume_args);
+
static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
@@ -1077,11 +1129,13 @@ void dm_table_event(struct dm_table *t)
t->event_fn(t->event_context);
mutex_unlock(&_event_lock);
}
+EXPORT_SYMBOL(dm_table_event);
sector_t dm_table_get_size(struct dm_table *t)
{
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
+EXPORT_SYMBOL(dm_table_get_size);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
{
@@ -1194,9 +1248,45 @@ static void dm_table_set_integrity(struct dm_table *t)
blk_get_integrity(template_disk));
}
+static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ unsigned flush = (*(unsigned *)data);
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && (q->flush_flags & flush);
+}
+
+static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ /*
+ * Require at least one underlying device to support flushes.
+ * t->devices includes internal dm devices such as mirror logs
+ * so we need to use iterate_devices here, which targets
+ * supporting flushes must provide.
+ */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->num_flush_requests)
+ continue;
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, device_flush_capable, &flush))
+ return 1;
+ }
+
+ return 0;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
+ unsigned flush = 0;
+
/*
* Copy table's limits to the DM device's request_queue
*/
@@ -1207,6 +1297,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ if (dm_table_supports_flush(t, REQ_FLUSH)) {
+ flush |= REQ_FLUSH;
+ if (dm_table_supports_flush(t, REQ_FUA))
+ flush |= REQ_FUA;
+ }
+ blk_queue_flush(q, flush);
+
dm_table_set_integrity(t);
/*
@@ -1237,6 +1334,7 @@ fmode_t dm_table_get_mode(struct dm_table *t)
{
return t->mode;
}
+EXPORT_SYMBOL(dm_table_get_mode);
static void suspend_targets(struct dm_table *t, unsigned postsuspend)
{
@@ -1345,6 +1443,7 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
}
+EXPORT_SYMBOL(dm_table_get_md);
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@@ -1359,19 +1458,19 @@ bool dm_table_supports_discards(struct dm_table *t)
struct dm_target *ti;
unsigned i = 0;
- if (!t->discards_supported)
- return 0;
-
/*
* Unless any target used by the table set discards_supported,
* require at least one underlying device to support discards.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
- * supporting discard must provide.
+ * supporting discard selectively must provide.
*/
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
+ if (!ti->num_discard_requests)
+ continue;
+
if (ti->discards_supported)
return 1;
@@ -1382,13 +1481,3 @@ bool dm_table_supports_discards(struct dm_table *t)
return 0;
}
-
-EXPORT_SYMBOL(dm_vcalloc);
-EXPORT_SYMBOL(dm_get_device);
-EXPORT_SYMBOL(dm_put_device);
-EXPORT_SYMBOL(dm_table_event);
-EXPORT_SYMBOL(dm_table_get_size);
-EXPORT_SYMBOL(dm_table_get_mode);
-EXPORT_SYMBOL(dm_table_get_md);
-EXPORT_SYMBOL(dm_table_put);
-EXPORT_SYMBOL(dm_table_get);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0cf68b47887..52b39f335bb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -37,6 +37,8 @@ static const char *_name = DM_NAME;
static unsigned int major = 0;
static unsigned int _major = 0;
+static DEFINE_IDR(_minor_idr);
+
static DEFINE_SPINLOCK(_minor_lock);
/*
* For bio-based dm.
@@ -109,6 +111,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_FREEING 3
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
+#define DMF_MERGE_IS_OPTIONAL 6
/*
* Work processed by per-device workqueue.
@@ -313,6 +316,12 @@ static void __exit dm_exit(void)
while (i--)
_exits[i]();
+
+ /*
+ * Should be empty by this point.
+ */
+ idr_remove_all(&_minor_idr);
+ idr_destroy(&_minor_idr);
}
/*
@@ -1171,7 +1180,8 @@ static int __clone_and_map_discard(struct clone_info *ci)
/*
* Even though the device advertised discard support,
- * reconfiguration might have changed that since the
+ * that does not mean every target supports it, and
+ * reconfiguration might also have changed that since the
* check was performed.
*/
if (!ti->num_discard_requests)
@@ -1705,8 +1715,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
-static DEFINE_IDR(_minor_idr);
-
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
@@ -1800,7 +1808,6 @@ static void dm_init_md_queue(struct mapped_device *md)
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
- blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
}
/*
@@ -1986,6 +1993,59 @@ static void __set_size(struct mapped_device *md, sector_t size)
}
/*
+ * Return 1 if the queue has a compulsory merge_bvec_fn function.
+ *
+ * If this function returns 0, then the device is either a non-dm
+ * device without a merge_bvec_fn, or it is a dm device that is
+ * able to split any bios it receives that are too big.
+ */
+int dm_queue_merge_is_compulsory(struct request_queue *q)
+{
+ struct mapped_device *dev_md;
+
+ if (!q->merge_bvec_fn)
+ return 0;
+
+ if (q->make_request_fn == dm_request) {
+ dev_md = q->queuedata;
+ if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
+ return 0;
+ }
+
+ return 1;
+}
+
+static int dm_device_merge_is_compulsory(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct block_device *bdev = dev->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ return dm_queue_merge_is_compulsory(q);
+}
+
+/*
+ * Return 1 if it is acceptable to ignore merge_bvec_fn based
+ * on the properties of the underlying devices.
+ */
+static int dm_table_merge_is_optional(struct dm_table *table)
+{
+ unsigned i = 0;
+ struct dm_target *ti;
+
+ while (i < dm_table_get_num_targets(table)) {
+ ti = dm_table_get_target(table, i++);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
@@ -1995,6 +2055,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct request_queue *q = md->queue;
sector_t size;
unsigned long flags;
+ int merge_is_optional;
size = dm_table_get_size(t);
@@ -2020,10 +2081,16 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
__bind_mempools(md, t);
+ merge_is_optional = dm_table_merge_is_optional(t);
+
write_lock_irqsave(&md->map_lock, flags);
old_map = md->map;
md->map = t;
dm_table_set_restrictions(t, q, limits);
+ if (merge_is_optional)
+ set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
+ else
+ clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
write_unlock_irqrestore(&md->map_lock, flags);
return old_map;
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1aaf16746da..6745dbd278a 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -66,6 +66,8 @@ int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
+int dm_queue_merge_is_compulsory(struct request_queue *q);
+
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 37b83eb6d70..21574bdf485 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -171,6 +171,37 @@ config MFD_TPS6586X
This driver can also be built as a module. If so, the module
will be called tps6586x.
+config MFD_TPS65910
+ bool "TPS65910 Power Management chip"
+ depends on I2C=y && GPIOLIB
+ select MFD_CORE
+ select GPIO_TPS65910
+ help
+ if you say yes here you get support for the TPS65910 series of
+ Power Management chips.
+
+config MFD_TPS65912
+ bool
+ depends on GPIOLIB
+
+config MFD_TPS65912_I2C
+ bool "TPS95612 Power Management chip with I2C"
+ select MFD_CORE
+ select MFD_TPS65912
+ depends on I2C=y && GPIOLIB
+ help
+ If you say yes here you get support for the TPS65912 series of
+ PM chips with I2C interface.
+
+config MFD_TPS65912_SPI
+ bool "TPS65912 Power Management chip with SPI"
+ select MFD_CORE
+ select MFD_TPS65912
+ depends on SPI_MASTER && GPIOLIB
+ help
+ If you say yes here you get support for the TPS65912 series of
+ PM chips with SPI interface.
+
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
depends on I2C=y && ARCH_OMAP2
@@ -662,8 +693,9 @@ config MFD_JANZ_CMODIO
CAN and GPIO controllers.
config MFD_JZ4740_ADC
- tristate "Support for the JZ4740 SoC ADC core"
+ bool "Support for the JZ4740 SoC ADC core"
select MFD_CORE
+ select GENERIC_IRQ_CHIP
depends on MACH_JZ4740
help
Say yes here if you want support for the ADC unit in the JZ4740 SoC.
@@ -725,18 +757,19 @@ config MFD_PM8XXX_IRQ
This is required to use certain other PM 8xxx features, such as GPIO
and MPP.
-config MFD_TPS65910
- bool "TPS65910 Power Management chip"
- depends on I2C=y && GPIOLIB
- select MFD_CORE
- select GPIO_TPS65910
- help
- if you say yes here you get support for the TPS65910 series of
- Power Management chips.
-
config TPS65911_COMPARATOR
tristate
+config MFD_AAT2870_CORE
+ bool "Support for the AnalogicTech AAT2870"
+ select MFD_CORE
+ depends on I2C=y && GPIOLIB
+ help
+ If you say yes here you get support for the AAT2870.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 22a280fcb70..c58020303d1 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
+wm831x-objs += wm831x-auxadc.o
obj-$(CONFIG_MFD_WM831X) += wm831x.o
obj-$(CONFIG_MFD_WM831X_I2C) += wm831x-i2c.o
obj-$(CONFIG_MFD_WM831X_SPI) += wm831x-spi.o
@@ -35,6 +36,11 @@ obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
+obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+tps65912-objs := tps65912-core.o tps65912-irq.o
+obj-$(CONFIG_MFD_TPS65912) += tps65912.o
+obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
+obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
@@ -94,5 +100,5 @@ obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
-obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
+obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
new file mode 100644
index 00000000000..345dc658ef0
--- /dev/null
+++ b/drivers/mfd/aat2870-core.c
@@ -0,0 +1,535 @@
+/*
+ * linux/drivers/mfd/aat2870-core.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/aat2870.h>
+#include <linux/regulator/machine.h>
+
+static struct aat2870_register aat2870_regs[AAT2870_REG_NUM] = {
+ /* readable, writeable, value */
+ { 0, 1, 0x00 }, /* 0x00 AAT2870_BL_CH_EN */
+ { 0, 1, 0x16 }, /* 0x01 AAT2870_BLM */
+ { 0, 1, 0x16 }, /* 0x02 AAT2870_BLS */
+ { 0, 1, 0x56 }, /* 0x03 AAT2870_BL1 */
+ { 0, 1, 0x56 }, /* 0x04 AAT2870_BL2 */
+ { 0, 1, 0x56 }, /* 0x05 AAT2870_BL3 */
+ { 0, 1, 0x56 }, /* 0x06 AAT2870_BL4 */
+ { 0, 1, 0x56 }, /* 0x07 AAT2870_BL5 */
+ { 0, 1, 0x56 }, /* 0x08 AAT2870_BL6 */
+ { 0, 1, 0x56 }, /* 0x09 AAT2870_BL7 */
+ { 0, 1, 0x56 }, /* 0x0A AAT2870_BL8 */
+ { 0, 1, 0x00 }, /* 0x0B AAT2870_FLR */
+ { 0, 1, 0x03 }, /* 0x0C AAT2870_FM */
+ { 0, 1, 0x03 }, /* 0x0D AAT2870_FS */
+ { 0, 1, 0x10 }, /* 0x0E AAT2870_ALS_CFG0 */
+ { 0, 1, 0x06 }, /* 0x0F AAT2870_ALS_CFG1 */
+ { 0, 1, 0x00 }, /* 0x10 AAT2870_ALS_CFG2 */
+ { 1, 0, 0x00 }, /* 0x11 AAT2870_AMB */
+ { 0, 1, 0x00 }, /* 0x12 AAT2870_ALS0 */
+ { 0, 1, 0x00 }, /* 0x13 AAT2870_ALS1 */
+ { 0, 1, 0x00 }, /* 0x14 AAT2870_ALS2 */
+ { 0, 1, 0x00 }, /* 0x15 AAT2870_ALS3 */
+ { 0, 1, 0x00 }, /* 0x16 AAT2870_ALS4 */
+ { 0, 1, 0x00 }, /* 0x17 AAT2870_ALS5 */
+ { 0, 1, 0x00 }, /* 0x18 AAT2870_ALS6 */
+ { 0, 1, 0x00 }, /* 0x19 AAT2870_ALS7 */
+ { 0, 1, 0x00 }, /* 0x1A AAT2870_ALS8 */
+ { 0, 1, 0x00 }, /* 0x1B AAT2870_ALS9 */
+ { 0, 1, 0x00 }, /* 0x1C AAT2870_ALSA */
+ { 0, 1, 0x00 }, /* 0x1D AAT2870_ALSB */
+ { 0, 1, 0x00 }, /* 0x1E AAT2870_ALSC */
+ { 0, 1, 0x00 }, /* 0x1F AAT2870_ALSD */
+ { 0, 1, 0x00 }, /* 0x20 AAT2870_ALSE */
+ { 0, 1, 0x00 }, /* 0x21 AAT2870_ALSF */
+ { 0, 1, 0x00 }, /* 0x22 AAT2870_SUB_SET */
+ { 0, 1, 0x00 }, /* 0x23 AAT2870_SUB_CTRL */
+ { 0, 1, 0x00 }, /* 0x24 AAT2870_LDO_AB */
+ { 0, 1, 0x00 }, /* 0x25 AAT2870_LDO_CD */
+ { 0, 1, 0x00 }, /* 0x26 AAT2870_LDO_EN */
+};
+
+static struct mfd_cell aat2870_devs[] = {
+ {
+ .name = "aat2870-backlight",
+ .id = AAT2870_ID_BL,
+ .pdata_size = sizeof(struct aat2870_bl_platform_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOA,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOB,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOC,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOD,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+};
+
+static int __aat2870_read(struct aat2870_data *aat2870, u8 addr, u8 *val)
+{
+ int ret;
+
+ if (addr >= AAT2870_REG_NUM) {
+ dev_err(aat2870->dev, "Invalid address, 0x%02x\n", addr);
+ return -EINVAL;
+ }
+
+ if (!aat2870->reg_cache[addr].readable) {
+ *val = aat2870->reg_cache[addr].value;
+ goto out;
+ }
+
+ ret = i2c_master_send(aat2870->client, &addr, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+ ret = i2c_master_recv(aat2870->client, val, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+out:
+ dev_dbg(aat2870->dev, "read: addr=0x%02x, val=0x%02x\n", addr, *val);
+ return 0;
+}
+
+static int __aat2870_write(struct aat2870_data *aat2870, u8 addr, u8 val)
+{
+ u8 msg[2];
+ int ret;
+
+ if (addr >= AAT2870_REG_NUM) {
+ dev_err(aat2870->dev, "Invalid address, 0x%02x\n", addr);
+ return -EINVAL;
+ }
+
+ if (!aat2870->reg_cache[addr].writeable) {
+ dev_err(aat2870->dev, "Address 0x%02x is not writeable\n",
+ addr);
+ return -EINVAL;
+ }
+
+ msg[0] = addr;
+ msg[1] = val;
+ ret = i2c_master_send(aat2870->client, msg, 2);
+ if (ret < 0)
+ return ret;
+ if (ret != 2)
+ return -EIO;
+
+ aat2870->reg_cache[addr].value = val;
+
+ dev_dbg(aat2870->dev, "write: addr=0x%02x, val=0x%02x\n", addr, val);
+ return 0;
+}
+
+static int aat2870_read(struct aat2870_data *aat2870, u8 addr, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&aat2870->io_lock);
+ ret = __aat2870_read(aat2870, addr, val);
+ mutex_unlock(&aat2870->io_lock);
+
+ return ret;
+}
+
+static int aat2870_write(struct aat2870_data *aat2870, u8 addr, u8 val)
+{
+ int ret;
+
+ mutex_lock(&aat2870->io_lock);
+ ret = __aat2870_write(aat2870, addr, val);
+ mutex_unlock(&aat2870->io_lock);
+
+ return ret;
+}
+
+static int aat2870_update(struct aat2870_data *aat2870, u8 addr, u8 mask,
+ u8 val)
+{
+ int change;
+ u8 old_val, new_val;
+ int ret;
+
+ mutex_lock(&aat2870->io_lock);
+
+ ret = __aat2870_read(aat2870, addr, &old_val);
+ if (ret)
+ goto out_unlock;
+
+ new_val = (old_val & ~mask) | (val & mask);
+ change = old_val != new_val;
+ if (change)
+ ret = __aat2870_write(aat2870, addr, new_val);
+
+out_unlock:
+ mutex_unlock(&aat2870->io_lock);
+
+ return ret;
+}
+
+static inline void aat2870_enable(struct aat2870_data *aat2870)
+{
+ if (aat2870->en_pin >= 0)
+ gpio_set_value(aat2870->en_pin, 1);
+
+ aat2870->is_enable = 1;
+}
+
+static inline void aat2870_disable(struct aat2870_data *aat2870)
+{
+ if (aat2870->en_pin >= 0)
+ gpio_set_value(aat2870->en_pin, 0);
+
+ aat2870->is_enable = 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t aat2870_dump_reg(struct aat2870_data *aat2870, char *buf)
+{
+ u8 addr, val;
+ ssize_t count = 0;
+ int ret;
+
+ count += sprintf(buf, "aat2870 registers\n");
+ for (addr = 0; addr < AAT2870_REG_NUM; addr++) {
+ count += sprintf(buf + count, "0x%02x: ", addr);
+ if (count >= PAGE_SIZE - 1)
+ break;
+
+ ret = aat2870->read(aat2870, addr, &val);
+ if (ret == 0)
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "0x%02x", val);
+ else
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "<read fail: %d>", ret);
+
+ if (count >= PAGE_SIZE - 1)
+ break;
+
+ count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+ if (count >= PAGE_SIZE - 1)
+ break;
+ }
+
+ /* Truncate count; min() would cause a warning */
+ if (count >= PAGE_SIZE)
+ count = PAGE_SIZE - 1;
+
+ return count;
+}
+
+static int aat2870_reg_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t aat2870_reg_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct aat2870_data *aat2870 = file->private_data;
+ char *buf;
+ ssize_t ret;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = aat2870_dump_reg(aat2870, buf);
+ if (ret >= 0)
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t aat2870_reg_write_file(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct aat2870_data *aat2870 = file->private_data;
+ char buf[32];
+ int buf_size;
+ char *start = buf;
+ unsigned long addr, val;
+ int ret;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ dev_err(aat2870->dev, "Failed to copy from user\n");
+ return -EFAULT;
+ }
+ buf[buf_size] = 0;
+
+ while (*start == ' ')
+ start++;
+
+ addr = simple_strtoul(start, &start, 16);
+ if (addr >= AAT2870_REG_NUM) {
+ dev_err(aat2870->dev, "Invalid address, 0x%lx\n", addr);
+ return -EINVAL;
+ }
+
+ while (*start == ' ')
+ start++;
+
+ if (strict_strtoul(start, 16, &val))
+ return -EINVAL;
+
+ ret = aat2870->write(aat2870, (u8)addr, (u8)val);
+ if (ret)
+ return ret;
+
+ return buf_size;
+}
+
+static const struct file_operations aat2870_reg_fops = {
+ .open = aat2870_reg_open_file,
+ .read = aat2870_reg_read_file,
+ .write = aat2870_reg_write_file,
+};
+
+static void aat2870_init_debugfs(struct aat2870_data *aat2870)
+{
+ aat2870->dentry_root = debugfs_create_dir("aat2870", NULL);
+ if (!aat2870->dentry_root) {
+ dev_warn(aat2870->dev,
+ "Failed to create debugfs root directory\n");
+ return;
+ }
+
+ aat2870->dentry_reg = debugfs_create_file("regs", 0644,
+ aat2870->dentry_root,
+ aat2870, &aat2870_reg_fops);
+ if (!aat2870->dentry_reg)
+ dev_warn(aat2870->dev,
+ "Failed to create debugfs register file\n");
+}
+
+static void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
+{
+ debugfs_remove_recursive(aat2870->dentry_root);
+}
+#else
+static inline void aat2870_init_debugfs(struct aat2870_data *aat2870)
+{
+}
+
+static inline void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int aat2870_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct aat2870_platform_data *pdata = client->dev.platform_data;
+ struct aat2870_data *aat2870;
+ int i, j;
+ int ret = 0;
+
+ aat2870 = kzalloc(sizeof(struct aat2870_data), GFP_KERNEL);
+ if (!aat2870) {
+ dev_err(&client->dev,
+ "Failed to allocate memory for aat2870\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ aat2870->dev = &client->dev;
+ dev_set_drvdata(aat2870->dev, aat2870);
+
+ aat2870->client = client;
+ i2c_set_clientdata(client, aat2870);
+
+ aat2870->reg_cache = aat2870_regs;
+
+ if (pdata->en_pin < 0)
+ aat2870->en_pin = -1;
+ else
+ aat2870->en_pin = pdata->en_pin;
+
+ aat2870->init = pdata->init;
+ aat2870->uninit = pdata->uninit;
+ aat2870->read = aat2870_read;
+ aat2870->write = aat2870_write;
+ aat2870->update = aat2870_update;
+
+ mutex_init(&aat2870->io_lock);
+
+ if (aat2870->init)
+ aat2870->init(aat2870);
+
+ if (aat2870->en_pin >= 0) {
+ ret = gpio_request(aat2870->en_pin, "aat2870-en");
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to request GPIO %d\n", aat2870->en_pin);
+ goto out_kfree;
+ }
+ gpio_direction_output(aat2870->en_pin, 1);
+ }
+
+ aat2870_enable(aat2870);
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ for (j = 0; j < ARRAY_SIZE(aat2870_devs); j++) {
+ if ((pdata->subdevs[i].id == aat2870_devs[j].id) &&
+ !strcmp(pdata->subdevs[i].name,
+ aat2870_devs[j].name)) {
+ aat2870_devs[j].platform_data =
+ pdata->subdevs[i].platform_data;
+ break;
+ }
+ }
+ }
+
+ ret = mfd_add_devices(aat2870->dev, 0, aat2870_devs,
+ ARRAY_SIZE(aat2870_devs), NULL, 0);
+ if (ret != 0) {
+ dev_err(aat2870->dev, "Failed to add subdev: %d\n", ret);
+ goto out_disable;
+ }
+
+ aat2870_init_debugfs(aat2870);
+
+ return 0;
+
+out_disable:
+ aat2870_disable(aat2870);
+ if (aat2870->en_pin >= 0)
+ gpio_free(aat2870->en_pin);
+out_kfree:
+ kfree(aat2870);
+out:
+ return ret;
+}
+
+static int aat2870_i2c_remove(struct i2c_client *client)
+{
+ struct aat2870_data *aat2870 = i2c_get_clientdata(client);
+
+ aat2870_uninit_debugfs(aat2870);
+
+ mfd_remove_devices(aat2870->dev);
+ aat2870_disable(aat2870);
+ if (aat2870->en_pin >= 0)
+ gpio_free(aat2870->en_pin);
+ if (aat2870->uninit)
+ aat2870->uninit(aat2870);
+ kfree(aat2870);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int aat2870_i2c_suspend(struct i2c_client *client, pm_message_t state)
+{
+ struct aat2870_data *aat2870 = i2c_get_clientdata(client);
+
+ aat2870_disable(aat2870);
+
+ return 0;
+}
+
+static int aat2870_i2c_resume(struct i2c_client *client)
+{
+ struct aat2870_data *aat2870 = i2c_get_clientdata(client);
+ struct aat2870_register *reg = NULL;
+ int i;
+
+ aat2870_enable(aat2870);
+
+ /* restore registers */
+ for (i = 0; i < AAT2870_REG_NUM; i++) {
+ reg = &aat2870->reg_cache[i];
+ if (reg->writeable)
+ aat2870->write(aat2870, i, reg->value);
+ }
+
+ return 0;
+}
+#else
+#define aat2870_i2c_suspend NULL
+#define aat2870_i2c_resume NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_device_id aat2870_i2c_id_table[] = {
+ { "aat2870", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aat2870_i2c_id_table);
+
+static struct i2c_driver aat2870_i2c_driver = {
+ .driver = {
+ .name = "aat2870",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_i2c_probe,
+ .remove = aat2870_i2c_remove,
+ .suspend = aat2870_i2c_suspend,
+ .resume = aat2870_i2c_resume,
+ .id_table = aat2870_i2c_id_table,
+};
+
+static int __init aat2870_init(void)
+{
+ return i2c_add_driver(&aat2870_i2c_driver);
+}
+subsys_initcall(aat2870_init);
+
+static void __exit aat2870_exit(void)
+{
+ i2c_del_driver(&aat2870_i2c_driver);
+}
+module_exit(aat2870_exit);
+
+MODULE_DESCRIPTION("Core support for the AnalogicTech AAT2870");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 3d7dce671b9..56ba1943c91 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -879,20 +879,13 @@ static ssize_t ab3550_bank_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_bank;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_bank);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
if (err)
- return -EINVAL;
+ return err;
if (user_bank >= AB3550_NUM_BANKS) {
dev_err(&ab->i2c_client[0]->dev,
@@ -902,7 +895,7 @@ static ssize_t ab3550_bank_write(struct file *file,
ab->debug_bank = user_bank;
- return buf_size;
+ return count;
}
static int ab3550_address_print(struct seq_file *s, void *p)
@@ -923,27 +916,21 @@ static ssize_t ab3550_address_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_address;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_address);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_address);
if (err)
- return -EINVAL;
+ return err;
+
if (user_address > 0xff) {
dev_err(&ab->i2c_client[0]->dev,
"debugfs error input > 0xff\n");
return -EINVAL;
}
ab->debug_address = user_address;
- return buf_size;
+ return count;
}
static int ab3550_val_print(struct seq_file *s, void *p)
@@ -971,21 +958,15 @@ static ssize_t ab3550_val_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_val;
int err;
u8 regvalue;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_val);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_val);
if (err)
- return -EINVAL;
+ return err;
+
if (user_val > 0xff) {
dev_err(&ab->i2c_client[0]->dev,
"debugfs error input > 0xff\n");
@@ -1002,7 +983,7 @@ static ssize_t ab3550_val_write(struct file *file,
if (err)
return -EINVAL;
- return buf_size;
+ return count;
}
static const struct file_operations ab3550_bank_fops = {
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index fc0c1af1566..387705e494b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -363,7 +363,7 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
}
}
-static struct resource ab8500_gpio_resources[] = {
+static struct resource __devinitdata ab8500_gpio_resources[] = {
{
.name = "GPIO_INT6",
.start = AB8500_INT_GPIO6R,
@@ -372,7 +372,7 @@ static struct resource ab8500_gpio_resources[] = {
}
};
-static struct resource ab8500_gpadc_resources[] = {
+static struct resource __devinitdata ab8500_gpadc_resources[] = {
{
.name = "HW_CONV_END",
.start = AB8500_INT_GP_HW_ADC_CONV_END,
@@ -387,7 +387,7 @@ static struct resource ab8500_gpadc_resources[] = {
},
};
-static struct resource ab8500_rtc_resources[] = {
+static struct resource __devinitdata ab8500_rtc_resources[] = {
{
.name = "60S",
.start = AB8500_INT_RTC_60S,
@@ -402,7 +402,7 @@ static struct resource ab8500_rtc_resources[] = {
},
};
-static struct resource ab8500_poweronkey_db_resources[] = {
+static struct resource __devinitdata ab8500_poweronkey_db_resources[] = {
{
.name = "ONKEY_DBF",
.start = AB8500_INT_PON_KEY1DB_F,
@@ -417,20 +417,47 @@ static struct resource ab8500_poweronkey_db_resources[] = {
},
};
-static struct resource ab8500_bm_resources[] = {
+static struct resource __devinitdata ab8500_av_acc_detect_resources[] = {
{
- .name = "MAIN_EXT_CH_NOT_OK",
- .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
- .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
- .flags = IORESOURCE_IRQ,
+ .name = "ACC_DETECT_1DB_F",
+ .start = AB8500_INT_ACC_DETECT_1DB_F,
+ .end = AB8500_INT_ACC_DETECT_1DB_F,
+ .flags = IORESOURCE_IRQ,
},
{
- .name = "BATT_OVV",
- .start = AB8500_INT_BATT_OVV,
- .end = AB8500_INT_BATT_OVV,
- .flags = IORESOURCE_IRQ,
+ .name = "ACC_DETECT_1DB_R",
+ .start = AB8500_INT_ACC_DETECT_1DB_R,
+ .end = AB8500_INT_ACC_DETECT_1DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ACC_DETECT_21DB_F",
+ .start = AB8500_INT_ACC_DETECT_21DB_F,
+ .end = AB8500_INT_ACC_DETECT_21DB_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ACC_DETECT_21DB_R",
+ .start = AB8500_INT_ACC_DETECT_21DB_R,
+ .end = AB8500_INT_ACC_DETECT_21DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ACC_DETECT_22DB_F",
+ .start = AB8500_INT_ACC_DETECT_22DB_F,
+ .end = AB8500_INT_ACC_DETECT_22DB_F,
+ .flags = IORESOURCE_IRQ,
},
{
+ .name = "ACC_DETECT_22DB_R",
+ .start = AB8500_INT_ACC_DETECT_22DB_R,
+ .end = AB8500_INT_ACC_DETECT_22DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource __devinitdata ab8500_charger_resources[] = {
+ {
.name = "MAIN_CH_UNPLUG_DET",
.start = AB8500_INT_MAIN_CH_UNPLUG_DET,
.end = AB8500_INT_MAIN_CH_UNPLUG_DET,
@@ -443,27 +470,27 @@ static struct resource ab8500_bm_resources[] = {
.flags = IORESOURCE_IRQ,
},
{
- .name = "VBUS_DET_F",
- .start = AB8500_INT_VBUS_DET_F,
- .end = AB8500_INT_VBUS_DET_F,
- .flags = IORESOURCE_IRQ,
- },
- {
.name = "VBUS_DET_R",
.start = AB8500_INT_VBUS_DET_R,
.end = AB8500_INT_VBUS_DET_R,
.flags = IORESOURCE_IRQ,
},
{
- .name = "BAT_CTRL_INDB",
- .start = AB8500_INT_BAT_CTRL_INDB,
- .end = AB8500_INT_BAT_CTRL_INDB,
+ .name = "VBUS_DET_F",
+ .start = AB8500_INT_VBUS_DET_F,
+ .end = AB8500_INT_VBUS_DET_F,
.flags = IORESOURCE_IRQ,
},
{
- .name = "CH_WD_EXP",
- .start = AB8500_INT_CH_WD_EXP,
- .end = AB8500_INT_CH_WD_EXP,
+ .name = "USB_LINK_STATUS",
+ .start = AB8500_INT_USB_LINK_STATUS,
+ .end = AB8500_INT_USB_LINK_STATUS,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGE_DET_DONE",
+ .start = AB8500_INT_USB_CHG_DET_DONE,
+ .end = AB8500_INT_USB_CHG_DET_DONE,
.flags = IORESOURCE_IRQ,
},
{
@@ -473,21 +500,60 @@ static struct resource ab8500_bm_resources[] = {
.flags = IORESOURCE_IRQ,
},
{
- .name = "NCONV_ACCU",
- .start = AB8500_INT_CCN_CONV_ACC,
- .end = AB8500_INT_CCN_CONV_ACC,
+ .name = "USB_CH_TH_PROT_R",
+ .start = AB8500_INT_USB_CH_TH_PROT_R,
+ .end = AB8500_INT_USB_CH_TH_PROT_R,
.flags = IORESOURCE_IRQ,
},
{
- .name = "LOW_BAT_F",
- .start = AB8500_INT_LOW_BAT_F,
- .end = AB8500_INT_LOW_BAT_F,
+ .name = "USB_CH_TH_PROT_F",
+ .start = AB8500_INT_USB_CH_TH_PROT_F,
+ .end = AB8500_INT_USB_CH_TH_PROT_F,
.flags = IORESOURCE_IRQ,
},
{
- .name = "LOW_BAT_R",
- .start = AB8500_INT_LOW_BAT_R,
- .end = AB8500_INT_LOW_BAT_R,
+ .name = "MAIN_EXT_CH_NOT_OK",
+ .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "MAIN_CH_TH_PROT_R",
+ .start = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .end = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "MAIN_CH_TH_PROT_F",
+ .start = AB8500_INT_MAIN_CH_TH_PROT_F,
+ .end = AB8500_INT_MAIN_CH_TH_PROT_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGER_NOT_OKR",
+ .start = AB8500_INT_USB_CHARGER_NOT_OK,
+ .end = AB8500_INT_USB_CHARGER_NOT_OK,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGER_NOT_OKF",
+ .start = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .end = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CH_WD_EXP",
+ .start = AB8500_INT_CH_WD_EXP,
+ .end = AB8500_INT_CH_WD_EXP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource __devinitdata ab8500_btemp_resources[] = {
+ {
+ .name = "BAT_CTRL_INDB",
+ .start = AB8500_INT_BAT_CTRL_INDB,
+ .end = AB8500_INT_BAT_CTRL_INDB,
.flags = IORESOURCE_IRQ,
},
{
@@ -503,38 +569,55 @@ static struct resource ab8500_bm_resources[] = {
.flags = IORESOURCE_IRQ,
},
{
- .name = "USB_CHARGER_NOT_OKR",
- .start = AB8500_INT_USB_CHARGER_NOT_OK,
- .end = AB8500_INT_USB_CHARGER_NOT_OK,
+ .name = "BTEMP_LOW_MEDIUM",
+ .start = AB8500_INT_BTEMP_LOW_MEDIUM,
+ .end = AB8500_INT_BTEMP_LOW_MEDIUM,
.flags = IORESOURCE_IRQ,
},
{
- .name = "USB_CHARGE_DET_DONE",
- .start = AB8500_INT_USB_CHG_DET_DONE,
- .end = AB8500_INT_USB_CHG_DET_DONE,
+ .name = "BTEMP_MEDIUM_HIGH",
+ .start = AB8500_INT_BTEMP_MEDIUM_HIGH,
+ .end = AB8500_INT_BTEMP_MEDIUM_HIGH,
.flags = IORESOURCE_IRQ,
},
+};
+
+static struct resource __devinitdata ab8500_fg_resources[] = {
{
- .name = "USB_CH_TH_PROT_R",
- .start = AB8500_INT_USB_CH_TH_PROT_R,
- .end = AB8500_INT_USB_CH_TH_PROT_R,
+ .name = "NCONV_ACCU",
+ .start = AB8500_INT_CCN_CONV_ACC,
+ .end = AB8500_INT_CCN_CONV_ACC,
.flags = IORESOURCE_IRQ,
},
{
- .name = "MAIN_CH_TH_PROT_R",
- .start = AB8500_INT_MAIN_CH_TH_PROT_R,
- .end = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .name = "BATT_OVV",
+ .start = AB8500_INT_BATT_OVV,
+ .end = AB8500_INT_BATT_OVV,
.flags = IORESOURCE_IRQ,
},
{
- .name = "USB_CHARGER_NOT_OKF",
- .start = AB8500_INT_USB_CHARGER_NOT_OKF,
- .end = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .name = "LOW_BAT_F",
+ .start = AB8500_INT_LOW_BAT_F,
+ .end = AB8500_INT_LOW_BAT_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "LOW_BAT_R",
+ .start = AB8500_INT_LOW_BAT_R,
+ .end = AB8500_INT_LOW_BAT_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CC_INT_CALIB",
+ .start = AB8500_INT_CC_INT_CALIB,
+ .end = AB8500_INT_CC_INT_CALIB,
.flags = IORESOURCE_IRQ,
},
};
-static struct resource ab8500_debug_resources[] = {
+static struct resource __devinitdata ab8500_chargalg_resources[] = {};
+
+static struct resource __devinitdata ab8500_debug_resources[] = {
{
.name = "IRQ_FIRST",
.start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
@@ -549,7 +632,7 @@ static struct resource ab8500_debug_resources[] = {
},
};
-static struct resource ab8500_usb_resources[] = {
+static struct resource __devinitdata ab8500_usb_resources[] = {
{
.name = "ID_WAKEUP_R",
.start = AB8500_INT_ID_WAKEUP_R,
@@ -580,9 +663,21 @@ static struct resource ab8500_usb_resources[] = {
.end = AB8500_INT_USB_LINK_STATUS,
.flags = IORESOURCE_IRQ,
},
+ {
+ .name = "USB_ADP_PROBE_PLUG",
+ .start = AB8500_INT_ADP_PROBE_PLUG,
+ .end = AB8500_INT_ADP_PROBE_PLUG,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_ADP_PROBE_UNPLUG",
+ .start = AB8500_INT_ADP_PROBE_UNPLUG,
+ .end = AB8500_INT_ADP_PROBE_UNPLUG,
+ .flags = IORESOURCE_IRQ,
+ },
};
-static struct resource ab8500_temp_resources[] = {
+static struct resource __devinitdata ab8500_temp_resources[] = {
{
.name = "AB8500_TEMP_WARM",
.start = AB8500_INT_TEMP_WARM,
@@ -591,7 +686,7 @@ static struct resource ab8500_temp_resources[] = {
},
};
-static struct mfd_cell ab8500_devs[] = {
+static struct mfd_cell __devinitdata ab8500_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
@@ -621,11 +716,33 @@ static struct mfd_cell ab8500_devs[] = {
.resources = ab8500_rtc_resources,
},
{
- .name = "ab8500-bm",
- .num_resources = ARRAY_SIZE(ab8500_bm_resources),
- .resources = ab8500_bm_resources,
+ .name = "ab8500-charger",
+ .num_resources = ARRAY_SIZE(ab8500_charger_resources),
+ .resources = ab8500_charger_resources,
+ },
+ {
+ .name = "ab8500-btemp",
+ .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+ .resources = ab8500_btemp_resources,
+ },
+ {
+ .name = "ab8500-fg",
+ .num_resources = ARRAY_SIZE(ab8500_fg_resources),
+ .resources = ab8500_fg_resources,
+ },
+ {
+ .name = "ab8500-chargalg",
+ .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+ .resources = ab8500_chargalg_resources,
+ },
+ {
+ .name = "ab8500-acc-det",
+ .num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
+ .resources = ab8500_av_acc_detect_resources,
+ },
+ {
+ .name = "ab8500-codec",
},
- { .name = "ab8500-codec", },
{
.name = "ab8500-usb",
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 64748e42ac0..64bdeeb1c11 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -419,20 +419,13 @@ static ssize_t ab8500_bank_write(struct file *file,
size_t count, loff_t *ppos)
{
struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_bank;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_bank);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
if (err)
- return -EINVAL;
+ return err;
if (user_bank >= AB8500_NUM_BANKS) {
dev_err(dev, "debugfs error input > number of banks\n");
@@ -441,7 +434,7 @@ static ssize_t ab8500_bank_write(struct file *file,
debug_bank = user_bank;
- return buf_size;
+ return count;
}
static int ab8500_address_print(struct seq_file *s, void *p)
@@ -459,26 +452,20 @@ static ssize_t ab8500_address_write(struct file *file,
size_t count, loff_t *ppos)
{
struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_address;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_address);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_address);
if (err)
- return -EINVAL;
+ return err;
+
if (user_address > 0xff) {
dev_err(dev, "debugfs error input > 0xff\n");
return -EINVAL;
}
debug_address = user_address;
- return buf_size;
+ return count;
}
static int ab8500_val_print(struct seq_file *s, void *p)
@@ -509,20 +496,14 @@ static ssize_t ab8500_val_write(struct file *file,
size_t count, loff_t *ppos)
{
struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_val;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_val);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_val);
if (err)
- return -EINVAL;
+ return err;
+
if (user_val > 0xff) {
dev_err(dev, "debugfs error input > 0xff\n");
return -EINVAL;
@@ -534,7 +515,7 @@ static ssize_t ab8500_val_write(struct file *file,
return -EINVAL;
}
- return buf_size;
+ return count;
}
static const struct file_operations ab8500_bank_fops = {
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index a0bd0cf05af..21131c7b0f1 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -56,7 +56,7 @@ struct jz4740_adc {
void __iomem *base;
int irq;
- int irq_base;
+ struct irq_chip_generic *gc;
struct clk *clk;
atomic_t clk_ref;
@@ -64,63 +64,17 @@ struct jz4740_adc {
spinlock_t lock;
};
-static inline void jz4740_adc_irq_set_masked(struct jz4740_adc *adc, int irq,
- bool masked)
-{
- unsigned long flags;
- uint8_t val;
-
- irq -= adc->irq_base;
-
- spin_lock_irqsave(&adc->lock, flags);
-
- val = readb(adc->base + JZ_REG_ADC_CTRL);
- if (masked)
- val |= BIT(irq);
- else
- val &= ~BIT(irq);
- writeb(val, adc->base + JZ_REG_ADC_CTRL);
-
- spin_unlock_irqrestore(&adc->lock, flags);
-}
-
-static void jz4740_adc_irq_mask(struct irq_data *data)
-{
- struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
- jz4740_adc_irq_set_masked(adc, data->irq, true);
-}
-
-static void jz4740_adc_irq_unmask(struct irq_data *data)
-{
- struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
- jz4740_adc_irq_set_masked(adc, data->irq, false);
-}
-
-static void jz4740_adc_irq_ack(struct irq_data *data)
-{
- struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
- unsigned int irq = data->irq - adc->irq_base;
- writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS);
-}
-
-static struct irq_chip jz4740_adc_irq_chip = {
- .name = "jz4740-adc",
- .irq_mask = jz4740_adc_irq_mask,
- .irq_unmask = jz4740_adc_irq_unmask,
- .irq_ack = jz4740_adc_irq_ack,
-};
-
static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- struct jz4740_adc *adc = irq_desc_get_handler_data(desc);
+ struct irq_chip_generic *gc = irq_desc_get_handler_data(desc);
uint8_t status;
unsigned int i;
- status = readb(adc->base + JZ_REG_ADC_STATUS);
+ status = readb(gc->reg_base + JZ_REG_ADC_STATUS);
for (i = 0; i < 5; ++i) {
if (status & BIT(i))
- generic_handle_irq(adc->irq_base + i);
+ generic_handle_irq(gc->irq_base + i);
}
}
@@ -249,10 +203,12 @@ const struct mfd_cell jz4740_adc_cells[] = {
static int __devinit jz4740_adc_probe(struct platform_device *pdev)
{
- int ret;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
struct jz4740_adc *adc;
struct resource *mem_base;
- int irq;
+ int ret;
+ int irq_base;
adc = kmalloc(sizeof(*adc), GFP_KERNEL);
if (!adc) {
@@ -267,9 +223,9 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
goto err_free;
}
- adc->irq_base = platform_get_irq(pdev, 1);
- if (adc->irq_base < 0) {
- ret = adc->irq_base;
+ irq_base = platform_get_irq(pdev, 1);
+ if (irq_base < 0) {
+ ret = irq_base;
dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret);
goto err_free;
}
@@ -309,20 +265,28 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, adc);
- for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) {
- irq_set_chip_data(irq, adc);
- irq_set_chip_and_handler(irq, &jz4740_adc_irq_chip,
- handle_level_irq);
- }
+ gc = irq_alloc_generic_chip("INTC", 1, irq_base, adc->base,
+ handle_level_irq);
+
+ ct = gc->chip_types;
+ ct->regs.mask = JZ_REG_ADC_CTRL;
+ ct->regs.ack = JZ_REG_ADC_STATUS;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ ct->chip.irq_ack = irq_gc_ack;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
+
+ adc->gc = gc;
- irq_set_handler_data(adc->irq, adc);
+ irq_set_handler_data(adc->irq, gc);
irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);
writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells,
- ARRAY_SIZE(jz4740_adc_cells), mem_base, adc->irq_base);
+ ARRAY_SIZE(jz4740_adc_cells), mem_base, irq_base);
if (ret < 0)
goto err_clk_put;
@@ -347,6 +311,8 @@ static int __devexit jz4740_adc_remove(struct platform_device *pdev)
mfd_remove_devices(&pdev->dev);
+ irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0);
+ kfree(adc->gc);
irq_set_handler_data(adc->irq, NULL);
irq_set_chained_handler(adc->irq, NULL);
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index ea3f52c07ef..ea1169b0477 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -37,6 +37,9 @@
#define GPIOBASE 0x44
#define GPIO_IO_SIZE 64
+#define WDTBASE 0x84
+#define WDT_IO_SIZE 64
+
static struct resource smbus_sch_resource = {
.flags = IORESOURCE_IO,
};
@@ -59,6 +62,18 @@ static struct mfd_cell lpc_sch_cells[] = {
},
};
+static struct resource wdt_sch_resource = {
+ .flags = IORESOURCE_IO,
+};
+
+static struct mfd_cell tunnelcreek_cells[] = {
+ {
+ .name = "tunnelcreek_wdt",
+ .num_resources = 1,
+ .resources = &wdt_sch_resource,
+ },
+};
+
static struct pci_device_id lpc_sch_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
@@ -72,6 +87,7 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
unsigned int base_addr_cfg;
unsigned short base_addr;
int i;
+ int ret;
pci_read_config_dword(dev, SMBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
@@ -104,8 +120,39 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
lpc_sch_cells[i].id = id->device;
- return mfd_add_devices(&dev->dev, 0,
+ ret = mfd_add_devices(&dev->dev, 0,
lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0);
+ if (ret)
+ goto out_dev;
+
+ if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
+ pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
+ if (!(base_addr_cfg & (1 << 31))) {
+ dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
+ ret = -ENODEV;
+ goto out_dev;
+ }
+ base_addr = (unsigned short)base_addr_cfg;
+ if (base_addr == 0) {
+ dev_err(&dev->dev, "I/O space for WDT uninitialized\n");
+ ret = -ENODEV;
+ goto out_dev;
+ }
+
+ wdt_sch_resource.start = base_addr;
+ wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
+
+ for (i = 0; i < ARRAY_SIZE(tunnelcreek_cells); i++)
+ tunnelcreek_cells[i].id = id->device;
+
+ ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells,
+ ARRAY_SIZE(tunnelcreek_cells), NULL, 0);
+ }
+
+ return ret;
+out_dev:
+ mfd_remove_devices(&dev->dev);
+ return ret;
}
static void __devexit lpc_sch_remove(struct pci_dev *dev)
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 638bf7e4d3b..09274cf7c33 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -58,8 +58,6 @@ static struct i2c_client *get_i2c(struct max8997_dev *max8997,
default:
return ERR_PTR(-EINVAL);
}
-
- return ERR_PTR(-EINVAL);
}
struct max8997_irq_data {
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 9ec7570f5b8..de4096aee24 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -39,6 +39,8 @@ static struct mfd_cell max8998_devs[] = {
.name = "max8998-pmic",
}, {
.name = "max8998-rtc",
+ }, {
+ .name = "max8998-battery",
},
};
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 1717144fe7f..29601e7d606 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -998,9 +998,9 @@ static void usbhs_disable(struct device *dev)
if (is_omap_usbhs_rev2(omap)) {
if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_enable(omap->usbtll_p1_fck);
+ clk_disable(omap->usbtll_p1_fck);
if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_enable(omap->usbtll_p2_fck);
+ clk_disable(omap->usbtll_p2_fck);
clk_disable(omap->utmi_p2_fck);
clk_disable(omap->utmi_p1_fck);
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 7ab7746631d..2963689cf45 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -228,7 +228,7 @@ int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
EXPORT_SYMBOL_GPL(stmpe_block_write);
/**
- * stmpe_set_altfunc: set the alternate function for STMPE pins
+ * stmpe_set_altfunc()- set the alternate function for STMPE pins
* @stmpe: Device to configure
* @pins: Bitmask of pins to affect
* @block: block to enable alternate functions for
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 0dbdc4e8cd7..e4ee3895658 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -42,6 +42,7 @@ struct stmpe_variant_block {
* @id_mask: bits valid in CHIPID register for comparison with id_val
* @num_gpios: number of GPIOS
* @af_bits: number of bits used to specify the alternate function
+ * @regs: variant specific registers.
* @blocks: list of blocks present on this device
* @num_blocks: number of blocks present on this device
* @num_irqs: number of internal IRQs available on this device
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 2229e66d80d..6f5b8cf2f65 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -147,12 +147,11 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
if (init_data == NULL)
return -ENOMEM;
- init_data->irq = pmic_plat_data->irq;
- init_data->irq_base = pmic_plat_data->irq;
-
tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
- if (tps65910 == NULL)
+ if (tps65910 == NULL) {
+ kfree(init_data);
return -ENOMEM;
+ }
i2c_set_clientdata(i2c, tps65910);
tps65910->dev = &i2c->dev;
@@ -168,17 +167,22 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err;
+ init_data->irq = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq;
+
tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
if (ret < 0)
goto err;
+ kfree(init_data);
return ret;
err:
mfd_remove_devices(tps65910->dev);
kfree(tps65910);
+ kfree(init_data);
return ret;
}
@@ -187,6 +191,7 @@ static int tps65910_i2c_remove(struct i2c_client *i2c)
struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
mfd_remove_devices(tps65910->dev);
+ tps65910_irq_exit(tps65910);
kfree(tps65910);
return 0;
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 283ac675975..e7ff783aa31 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -157,6 +157,8 @@ static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
struct tps65910 *tps65910;
tps65910 = dev_get_drvdata(pdev->dev.parent);
+ device_remove_file(&pdev->dev, &dev_attr_comp2_threshold);
+ device_remove_file(&pdev->dev, &dev_attr_comp1_threshold);
return 0;
}
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
new file mode 100644
index 00000000000..955bc00e4b2
--- /dev/null
+++ b/drivers/mfd/tps65912-core.c
@@ -0,0 +1,177 @@
+/*
+ * tps65912-core.c -- TI TPS65912x
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65912.h>
+
+static struct mfd_cell tps65912s[] = {
+ {
+ .name = "tps65912-pmic",
+ },
+};
+
+int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65912->io_mutex);
+
+ err = tps65912->read(tps65912, reg, 1, &data);
+ if (err) {
+ dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
+ goto out;
+ }
+
+ data |= mask;
+ err = tps65912->write(tps65912, reg, 1, &data);
+ if (err)
+ dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65912->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65912_set_bits);
+
+int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65912->io_mutex);
+ err = tps65912->read(tps65912, reg, 1, &data);
+ if (err) {
+ dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
+ goto out;
+ }
+
+ data &= ~mask;
+ err = tps65912->write(tps65912, reg, 1, &data);
+ if (err)
+ dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65912->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65912_clear_bits);
+
+static inline int tps65912_read(struct tps65912 *tps65912, u8 reg)
+{
+ u8 val;
+ int err;
+
+ err = tps65912->read(tps65912, reg, 1, &val);
+ if (err < 0)
+ return err;
+
+ return val;
+}
+
+static inline int tps65912_write(struct tps65912 *tps65912, u8 reg, u8 val)
+{
+ return tps65912->write(tps65912, reg, 1, &val);
+}
+
+int tps65912_reg_read(struct tps65912 *tps65912, u8 reg)
+{
+ int data;
+
+ mutex_lock(&tps65912->io_mutex);
+
+ data = tps65912_read(tps65912, reg);
+ if (data < 0)
+ dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps65912->io_mutex);
+ return data;
+}
+EXPORT_SYMBOL_GPL(tps65912_reg_read);
+
+int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val)
+{
+ int err;
+
+ mutex_lock(&tps65912->io_mutex);
+
+ err = tps65912_write(tps65912, reg, val);
+ if (err < 0)
+ dev_err(tps65912->dev, "Write for reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps65912->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65912_reg_write);
+
+int tps65912_device_init(struct tps65912 *tps65912)
+{
+ struct tps65912_board *pmic_plat_data = tps65912->dev->platform_data;
+ struct tps65912_platform_data *init_data;
+ int ret, dcdc_avs, value;
+
+ init_data = kzalloc(sizeof(struct tps65912_platform_data), GFP_KERNEL);
+ if (init_data == NULL)
+ return -ENOMEM;
+
+ init_data->irq = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq;
+
+ mutex_init(&tps65912->io_mutex);
+ dev_set_drvdata(tps65912->dev, tps65912);
+
+ dcdc_avs = (pmic_plat_data->is_dcdc1_avs << 0 |
+ pmic_plat_data->is_dcdc2_avs << 1 |
+ pmic_plat_data->is_dcdc3_avs << 2 |
+ pmic_plat_data->is_dcdc4_avs << 3);
+ if (dcdc_avs) {
+ tps65912->read(tps65912, TPS65912_I2C_SPI_CFG, 1, &value);
+ dcdc_avs |= value;
+ tps65912->write(tps65912, TPS65912_I2C_SPI_CFG, 1, &dcdc_avs);
+ }
+
+ ret = mfd_add_devices(tps65912->dev, -1,
+ tps65912s, ARRAY_SIZE(tps65912s),
+ NULL, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ kfree(init_data);
+ mfd_remove_devices(tps65912->dev);
+ kfree(tps65912);
+ return ret;
+}
+
+void tps65912_device_exit(struct tps65912 *tps65912)
+{
+ mfd_remove_devices(tps65912->dev);
+ kfree(tps65912);
+}
+
+MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS65912x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
new file mode 100644
index 00000000000..c041f2c3d2b
--- /dev/null
+++ b/drivers/mfd/tps65912-i2c.c
@@ -0,0 +1,139 @@
+/*
+ * tps65912-i2c.c -- I2C access for TI TPS65912x PMIC
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65912.h>
+
+static int tps65912_i2c_read(struct tps65912 *tps65912, u8 reg,
+ int bytes, void *dest)
+{
+ struct i2c_client *i2c = tps65912->control_data;
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = bytes;
+ xfer[1].buf = dest;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+ return ret;
+}
+
+static int tps65912_i2c_write(struct tps65912 *tps65912, u8 reg,
+ int bytes, void *src)
+{
+ struct i2c_client *i2c = tps65912->control_data;
+ /* we add 1 byte for device register */
+ u8 msg[TPS6591X_MAX_REGISTER + 1];
+ int ret;
+
+ if (bytes > TPS6591X_MAX_REGISTER)
+ return -EINVAL;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, bytes);
+
+ ret = i2c_master_send(i2c, msg, bytes + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != bytes + 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int tps65912_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tps65912 *tps65912;
+
+ tps65912 = kzalloc(sizeof(struct tps65912), GFP_KERNEL);
+ if (tps65912 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, tps65912);
+ tps65912->dev = &i2c->dev;
+ tps65912->control_data = i2c;
+ tps65912->read = tps65912_i2c_read;
+ tps65912->write = tps65912_i2c_write;
+
+ return tps65912_device_init(tps65912);
+}
+
+static int tps65912_i2c_remove(struct i2c_client *i2c)
+{
+ struct tps65912 *tps65912 = i2c_get_clientdata(i2c);
+
+ tps65912_device_exit(tps65912);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps65912_i2c_id[] = {
+ {"tps65912", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id);
+
+static struct i2c_driver tps65912_i2c_driver = {
+ .driver = {
+ .name = "tps65912",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_i2c_probe,
+ .remove = tps65912_i2c_remove,
+ .id_table = tps65912_i2c_id,
+};
+
+static int __init tps65912_i2c_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&tps65912_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register TPS65912 I2C driver: %d\n", ret);
+
+ return ret;
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps65912_i2c_init);
+
+static void __exit tps65912_i2c_exit(void)
+{
+ i2c_del_driver(&tps65912_i2c_driver);
+}
+module_exit(tps65912_i2c_exit);
+
+MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
new file mode 100644
index 00000000000..d360a83a273
--- /dev/null
+++ b/drivers/mfd/tps65912-irq.c
@@ -0,0 +1,224 @@
+/*
+ * tps65912-irq.c -- TI TPS6591x
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65912.h>
+
+static inline int irq_to_tps65912_irq(struct tps65912 *tps65912,
+ int irq)
+{
+ return irq - tps65912->irq_base;
+}
+
+/*
+ * This is a threaded IRQ handler so can access I2C/SPI. Since the
+ * IRQ handler explicitly clears the IRQ it handles the IRQ line
+ * will be reasserted and the physical IRQ will be handled again if
+ * another interrupt is asserted while we run - in the normal course
+ * of events this is a rare occurrence so we save I2C/SPI reads. We're
+ * also assuming that it's rare to get lots of interrupts firing
+ * simultaneously so try to minimise I/O.
+ */
+static irqreturn_t tps65912_irq(int irq, void *irq_data)
+{
+ struct tps65912 *tps65912 = irq_data;
+ u32 irq_sts;
+ u32 irq_mask;
+ u8 reg;
+ int i;
+
+
+ tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
+ irq_sts = reg;
+ tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
+ irq_sts |= reg << 8;
+ tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
+ irq_sts |= reg << 16;
+ tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
+ irq_sts |= reg << 24;
+
+ tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
+ irq_mask = reg;
+ tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
+ irq_mask |= reg << 8;
+ tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
+ irq_mask |= reg << 16;
+ tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
+ irq_mask |= reg << 24;
+
+ irq_sts &= ~irq_mask;
+ if (!irq_sts)
+ return IRQ_NONE;
+
+ for (i = 0; i < tps65912->irq_num; i++) {
+ if (!(irq_sts & (1 << i)))
+ continue;
+
+ handle_nested_irq(tps65912->irq_base + i);
+ }
+
+ /* Write the STS register back to clear IRQs we handled */
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
+ reg = irq_sts & 0xFF;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
+
+ return IRQ_HANDLED;
+}
+
+static void tps65912_irq_lock(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&tps65912->irq_lock);
+}
+
+static void tps65912_irq_sync_unlock(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+ u32 reg_mask;
+ u8 reg;
+
+ tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
+ reg_mask = reg;
+ tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
+ reg_mask |= reg << 8;
+ tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
+ reg_mask |= reg << 16;
+ tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
+ reg_mask |= reg << 24;
+
+ if (tps65912->irq_mask != reg_mask) {
+ reg = tps65912->irq_mask & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK, 1, &reg);
+ reg = tps65912->irq_mask >> 8 & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK2, 1, &reg);
+ reg = tps65912->irq_mask >> 16 & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK3, 1, &reg);
+ reg = tps65912->irq_mask >> 24 & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK4, 1, &reg);
+ }
+
+ mutex_unlock(&tps65912->irq_lock);
+}
+
+static void tps65912_irq_enable(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+
+ tps65912->irq_mask &= ~(1 << irq_to_tps65912_irq(tps65912, data->irq));
+}
+
+static void tps65912_irq_disable(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+
+ tps65912->irq_mask |= (1 << irq_to_tps65912_irq(tps65912, data->irq));
+}
+
+static struct irq_chip tps65912_irq_chip = {
+ .name = "tps65912",
+ .irq_bus_lock = tps65912_irq_lock,
+ .irq_bus_sync_unlock = tps65912_irq_sync_unlock,
+ .irq_disable = tps65912_irq_disable,
+ .irq_enable = tps65912_irq_enable,
+};
+
+int tps65912_irq_init(struct tps65912 *tps65912, int irq,
+ struct tps65912_platform_data *pdata)
+{
+ int ret, cur_irq;
+ int flags = IRQF_ONESHOT;
+ u8 reg;
+
+ if (!irq) {
+ dev_warn(tps65912->dev, "No interrupt support, no core IRQ\n");
+ return 0;
+ }
+
+ if (!pdata || !pdata->irq_base) {
+ dev_warn(tps65912->dev, "No interrupt support, no IRQ base\n");
+ return 0;
+ }
+
+ /* Clear unattended interrupts */
+ tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
+ tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
+ tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
+ tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
+
+ /* Mask top level interrupts */
+ tps65912->irq_mask = 0xFFFFFFFF;
+
+ mutex_init(&tps65912->irq_lock);
+ tps65912->chip_irq = irq;
+ tps65912->irq_base = pdata->irq_base;
+
+ tps65912->irq_num = TPS65912_NUM_IRQ;
+
+ /* Register with genirq */
+ for (cur_irq = tps65912->irq_base;
+ cur_irq < tps65912->irq_num + tps65912->irq_base;
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, tps65912);
+ irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
+ "tps65912", tps65912);
+
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+ if (ret != 0)
+ dev_err(tps65912->dev, "Failed to request IRQ: %d\n", ret);
+
+ return ret;
+}
+
+int tps65912_irq_exit(struct tps65912 *tps65912)
+{
+ free_irq(tps65912->chip_irq, tps65912);
+ return 0;
+}
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
new file mode 100644
index 00000000000..6d71e0d2574
--- /dev/null
+++ b/drivers/mfd/tps65912-spi.c
@@ -0,0 +1,142 @@
+/*
+ * tps65912-spi.c -- SPI access for TI TPS65912x PMIC
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65912.h>
+
+static int tps65912_spi_write(struct tps65912 *tps65912, u8 addr,
+ int bytes, void *src)
+{
+ struct spi_device *spi = tps65912->control_data;
+ u8 *data = (u8 *) src;
+ int ret;
+ /* bit 23 is the read/write bit */
+ unsigned long spi_data = 1 << 23 | addr << 15 | *data;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ u32 tx_buf, rx_buf;
+
+ tx_buf = spi_data;
+ rx_buf = 0;
+
+ xfer.tx_buf = &tx_buf;
+ xfer.rx_buf = NULL;
+ xfer.len = sizeof(unsigned long);
+ xfer.bits_per_word = 24;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ ret = spi_sync(spi, &msg);
+ return ret;
+}
+
+static int tps65912_spi_read(struct tps65912 *tps65912, u8 addr,
+ int bytes, void *dest)
+{
+ struct spi_device *spi = tps65912->control_data;
+ /* bit 23 is the read/write bit */
+ unsigned long spi_data = 0 << 23 | addr << 15;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ int ret;
+ u8 *data = (u8 *) dest;
+ u32 tx_buf, rx_buf;
+
+ tx_buf = spi_data;
+ rx_buf = 0;
+
+ xfer.tx_buf = &tx_buf;
+ xfer.rx_buf = &rx_buf;
+ xfer.len = sizeof(unsigned long);
+ xfer.bits_per_word = 24;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ if (spi == NULL)
+ return 0;
+
+ ret = spi_sync(spi, &msg);
+ if (ret == 0)
+ *data = (u8) (rx_buf & 0xFF);
+ return ret;
+}
+
+static int __devinit tps65912_spi_probe(struct spi_device *spi)
+{
+ struct tps65912 *tps65912;
+
+ tps65912 = kzalloc(sizeof(struct tps65912), GFP_KERNEL);
+ if (tps65912 == NULL)
+ return -ENOMEM;
+
+ tps65912->dev = &spi->dev;
+ tps65912->control_data = spi;
+ tps65912->read = tps65912_spi_read;
+ tps65912->write = tps65912_spi_write;
+
+ spi_set_drvdata(spi, tps65912);
+
+ return tps65912_device_init(tps65912);
+}
+
+static int __devexit tps65912_spi_remove(struct spi_device *spi)
+{
+ struct tps65912 *tps65912 = spi_get_drvdata(spi);
+
+ tps65912_device_exit(tps65912);
+
+ return 0;
+}
+
+static struct spi_driver tps65912_spi_driver = {
+ .driver = {
+ .name = "tps65912",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_spi_probe,
+ .remove = __devexit_p(tps65912_spi_remove),
+};
+
+static int __init tps65912_spi_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&tps65912_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register TPS65912 SPI driver: %d\n", ret);
+
+ return 0;
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps65912_spi_init);
+
+static void __exit tps65912_spi_exit(void)
+{
+ spi_unregister_driver(&tps65912_spi_driver);
+}
+module_exit(tps65912_spi_exit);
+
+MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("SPI support for TPS65912 chip family mfd");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index a2eddc70995..01ecfeee652 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1283,6 +1283,8 @@ static const struct i2c_device_id twl_ids[] = {
{ "tps65950", 0 }, /* catalog version of twl5030 */
{ "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */
{ "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */
+ { "tps65921", TPS_SUBSET }, /* fewer LDOs; no codec, no LED
+ and vibrator. Charger in USB module*/
{ "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */
{ "twl6025", TWL6030_CLASS | TWL6025_SUBCLASS }, /* "Phoenix lite" */
{ /* end of list */ },
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 3941ddcf15f..b5d598c3aa7 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -530,13 +530,13 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
if (ret) {
dev_err(twl4030_madc->dev,
"unable to write sel register 0x%X\n", method->sel + 1);
- return ret;
+ goto out;
}
ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
if (ret) {
dev_err(twl4030_madc->dev,
"unable to write sel register 0x%X\n", method->sel + 1);
- return ret;
+ goto out;
}
/* Select averaging for all channels if do_avg is set */
if (req->do_avg) {
@@ -546,7 +546,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
dev_err(twl4030_madc->dev,
"unable to write avg register 0x%X\n",
method->avg + 1);
- return ret;
+ goto out;
}
ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
ch_lsb, method->avg);
@@ -554,7 +554,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
dev_err(twl4030_madc->dev,
"unable to write sel reg 0x%X\n",
method->sel + 1);
- return ret;
+ goto out;
}
}
if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
diff --git a/drivers/mfd/twl6030-pwm.c b/drivers/mfd/twl6030-pwm.c
index 5d25bdc7842..e8fee147678 100644
--- a/drivers/mfd/twl6030-pwm.c
+++ b/drivers/mfd/twl6030-pwm.c
@@ -161,3 +161,5 @@ void pwm_free(struct pwm_device *pwm)
kfree(pwm);
}
EXPORT_SYMBOL(pwm_free);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
new file mode 100644
index 00000000000..87210954a06
--- /dev/null
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -0,0 +1,299 @@
+/*
+ * wm831x-auxadc.c -- AUXADC for Wolfson WM831x PMICs
+ *
+ * Copyright 2009-2011 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/core.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/pdata.h>
+#include <linux/mfd/wm831x/irq.h>
+#include <linux/mfd/wm831x/auxadc.h>
+#include <linux/mfd/wm831x/otp.h>
+#include <linux/mfd/wm831x/regulator.h>
+
+struct wm831x_auxadc_req {
+ struct list_head list;
+ enum wm831x_auxadc input;
+ int val;
+ struct completion done;
+};
+
+static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
+ enum wm831x_auxadc input)
+{
+ struct wm831x_auxadc_req *req;
+ int ret;
+ bool ena = false;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ init_completion(&req->done);
+ req->input = input;
+ req->val = -ETIMEDOUT;
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ /* Enqueue the request */
+ list_add(&req->list, &wm831x->auxadc_pending);
+
+ ena = !wm831x->auxadc_active;
+
+ if (ena) {
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_ENA, WM831X_AUX_ENA);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ /* Enable the conversion if not already running */
+ if (!(wm831x->auxadc_active & (1 << input))) {
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE,
+ 1 << input, 1 << input);
+ if (ret != 0) {
+ dev_err(wm831x->dev,
+ "Failed to set AUXADC source: %d\n", ret);
+ goto out;
+ }
+
+ wm831x->auxadc_active |= 1 << input;
+ }
+
+ /* We convert at the fastest rate possible */
+ if (ena) {
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_CVT_ENA |
+ WM831X_AUX_RATE_MASK,
+ WM831X_AUX_CVT_ENA |
+ WM831X_AUX_RATE_MASK);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to start AUXADC: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ /* Wait for an interrupt */
+ wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ list_del(&req->list);
+ ret = req->val;
+
+out:
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ kfree(req);
+
+ return ret;
+}
+
+static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
+{
+ struct wm831x *wm831x = irq_data;
+ struct wm831x_auxadc_req *req;
+ int ret, input, val;
+
+ ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "Failed to read AUXADC data: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ input = ((ret & WM831X_AUX_DATA_SRC_MASK)
+ >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
+
+ if (input == 14)
+ input = WM831X_AUX_CAL;
+
+ val = ret & WM831X_AUX_DATA_MASK;
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ /* Disable this conversion, we're about to complete all users */
+ wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE,
+ 1 << input, 0);
+ wm831x->auxadc_active &= ~(1 << input);
+
+ /* Turn off the entire convertor if idle */
+ if (!wm831x->auxadc_active)
+ wm831x_reg_write(wm831x, WM831X_AUXADC_CONTROL, 0);
+
+ /* Wake up any threads waiting for this request */
+ list_for_each_entry(req, &wm831x->auxadc_pending, list) {
+ if (req->input == input) {
+ req->val = val;
+ complete(&req->done);
+ }
+ }
+
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int wm831x_auxadc_read_polled(struct wm831x *wm831x,
+ enum wm831x_auxadc input)
+{
+ int ret, src, timeout;
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_ENA, WM831X_AUX_ENA);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret);
+ goto out;
+ }
+
+ /* We force a single source at present */
+ src = input;
+ ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE,
+ 1 << src);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret);
+ goto out;
+ }
+
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret);
+ goto disable;
+ }
+
+ /* If we're not using interrupts then poll the
+ * interrupt status register */
+ timeout = 5;
+ while (timeout) {
+ msleep(1);
+
+ ret = wm831x_reg_read(wm831x,
+ WM831X_INTERRUPT_STATUS_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "ISR 1 read failed: %d\n", ret);
+ goto disable;
+ }
+
+ /* Did it complete? */
+ if (ret & WM831X_AUXADC_DATA_EINT) {
+ wm831x_reg_write(wm831x,
+ WM831X_INTERRUPT_STATUS_1,
+ WM831X_AUXADC_DATA_EINT);
+ break;
+ } else {
+ dev_err(wm831x->dev,
+ "AUXADC conversion timeout\n");
+ ret = -EBUSY;
+ goto disable;
+ }
+ }
+
+ ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "Failed to read AUXADC data: %d\n", ret);
+ goto disable;
+ }
+
+ src = ((ret & WM831X_AUX_DATA_SRC_MASK)
+ >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
+
+ if (src == 14)
+ src = WM831X_AUX_CAL;
+
+ if (src != input) {
+ dev_err(wm831x->dev, "Data from source %d not %d\n",
+ src, input);
+ ret = -EINVAL;
+ } else {
+ ret &= WM831X_AUX_DATA_MASK;
+ }
+
+disable:
+ wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0);
+out:
+ mutex_unlock(&wm831x->auxadc_lock);
+ return ret;
+}
+
+/**
+ * wm831x_auxadc_read: Read a value from the WM831x AUXADC
+ *
+ * @wm831x: Device to read from.
+ * @input: AUXADC input to read.
+ */
+int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
+{
+ return wm831x->auxadc_read(wm831x, input);
+}
+EXPORT_SYMBOL_GPL(wm831x_auxadc_read);
+
+/**
+ * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC
+ *
+ * @wm831x: Device to read from.
+ * @input: AUXADC input to read.
+ */
+int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input)
+{
+ int ret;
+
+ ret = wm831x_auxadc_read(wm831x, input);
+ if (ret < 0)
+ return ret;
+
+ ret *= 1465;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv);
+
+void wm831x_auxadc_init(struct wm831x *wm831x)
+{
+ int ret;
+
+ mutex_init(&wm831x->auxadc_lock);
+ INIT_LIST_HEAD(&wm831x->auxadc_pending);
+
+ if (wm831x->irq && wm831x->irq_base) {
+ wm831x->auxadc_read = wm831x_auxadc_read_irq;
+
+ ret = request_threaded_irq(wm831x->irq_base +
+ WM831X_IRQ_AUXADC_DATA,
+ NULL, wm831x_auxadc_irq, 0,
+ "auxadc", wm831x);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n",
+ ret);
+ wm831x->auxadc_read = NULL;
+ }
+ }
+
+ if (!wm831x->auxadc_read)
+ wm831x->auxadc_read = wm831x_auxadc_read_polled;
+}
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 265f75fc6a2..282e76ab678 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -295,7 +295,7 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
goto out;
r &= ~mask;
- r |= val;
+ r |= val & mask;
ret = wm831x_write(wm831x, reg, 2, &r);
@@ -306,146 +306,6 @@ out:
}
EXPORT_SYMBOL_GPL(wm831x_set_bits);
-/**
- * wm831x_auxadc_read: Read a value from the WM831x AUXADC
- *
- * @wm831x: Device to read from.
- * @input: AUXADC input to read.
- */
-int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
-{
- int ret, src, irq_masked, timeout;
-
- /* Are we using the interrupt? */
- irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK);
- irq_masked &= WM831X_AUXADC_DATA_EINT;
-
- mutex_lock(&wm831x->auxadc_lock);
-
- ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
- WM831X_AUX_ENA, WM831X_AUX_ENA);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret);
- goto out;
- }
-
- /* We force a single source at present */
- src = input;
- ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE,
- 1 << src);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret);
- goto out;
- }
-
- /* Clear any notification from a very late arriving interrupt */
- try_wait_for_completion(&wm831x->auxadc_done);
-
- ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
- WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret);
- goto disable;
- }
-
- if (irq_masked) {
- /* If we're not using interrupts then poll the
- * interrupt status register */
- timeout = 5;
- while (timeout) {
- msleep(1);
-
- ret = wm831x_reg_read(wm831x,
- WM831X_INTERRUPT_STATUS_1);
- if (ret < 0) {
- dev_err(wm831x->dev,
- "ISR 1 read failed: %d\n", ret);
- goto disable;
- }
-
- /* Did it complete? */
- if (ret & WM831X_AUXADC_DATA_EINT) {
- wm831x_reg_write(wm831x,
- WM831X_INTERRUPT_STATUS_1,
- WM831X_AUXADC_DATA_EINT);
- break;
- } else {
- dev_err(wm831x->dev,
- "AUXADC conversion timeout\n");
- ret = -EBUSY;
- goto disable;
- }
- }
- } else {
- /* If we are using interrupts then wait for the
- * interrupt to complete. Use an extremely long
- * timeout to handle situations with heavy load where
- * the notification of the interrupt may be delayed by
- * threaded IRQ handling. */
- if (!wait_for_completion_timeout(&wm831x->auxadc_done,
- msecs_to_jiffies(500))) {
- dev_err(wm831x->dev, "Timed out waiting for AUXADC\n");
- ret = -EBUSY;
- goto disable;
- }
- }
-
- ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret);
- } else {
- src = ((ret & WM831X_AUX_DATA_SRC_MASK)
- >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
-
- if (src == 14)
- src = WM831X_AUX_CAL;
-
- if (src != input) {
- dev_err(wm831x->dev, "Data from source %d not %d\n",
- src, input);
- ret = -EINVAL;
- } else {
- ret &= WM831X_AUX_DATA_MASK;
- }
- }
-
-disable:
- wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0);
-out:
- mutex_unlock(&wm831x->auxadc_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(wm831x_auxadc_read);
-
-static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
-{
- struct wm831x *wm831x = irq_data;
-
- complete(&wm831x->auxadc_done);
-
- return IRQ_HANDLED;
-}
-
-/**
- * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC
- *
- * @wm831x: Device to read from.
- * @input: AUXADC input to read.
- */
-int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input)
-{
- int ret;
-
- ret = wm831x_auxadc_read(wm831x, input);
- if (ret < 0)
- return ret;
-
- ret *= 1465;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv);
-
static struct resource wm831x_dcdc1_resources[] = {
{
.start = WM831X_DC1_CONTROL_1,
@@ -872,6 +732,9 @@ static struct mfd_cell wm8310_devs[] = {
.resources = wm831x_dcdc4_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-epe",
.id = 1,
},
@@ -976,11 +839,6 @@ static struct mfd_cell wm8310_devs[] = {
.resources = wm831x_power_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1028,6 +886,9 @@ static struct mfd_cell wm8311_devs[] = {
.resources = wm831x_dcdc4_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-epe",
.id = 1,
},
@@ -1108,11 +969,6 @@ static struct mfd_cell wm8311_devs[] = {
.resources = wm831x_power_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1125,11 +981,6 @@ static struct mfd_cell wm8311_devs[] = {
.resources = wm831x_status2_resources,
},
{
- .name = "wm831x-touch",
- .num_resources = ARRAY_SIZE(wm831x_touch_resources),
- .resources = wm831x_touch_resources,
- },
- {
.name = "wm831x-watchdog",
.num_resources = ARRAY_SIZE(wm831x_wdt_resources),
.resources = wm831x_wdt_resources,
@@ -1165,6 +1016,9 @@ static struct mfd_cell wm8312_devs[] = {
.resources = wm831x_dcdc4_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-epe",
.id = 1,
},
@@ -1269,11 +1123,6 @@ static struct mfd_cell wm8312_devs[] = {
.resources = wm831x_power_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1286,11 +1135,6 @@ static struct mfd_cell wm8312_devs[] = {
.resources = wm831x_status2_resources,
},
{
- .name = "wm831x-touch",
- .num_resources = ARRAY_SIZE(wm831x_touch_resources),
- .resources = wm831x_touch_resources,
- },
- {
.name = "wm831x-watchdog",
.num_resources = ARRAY_SIZE(wm831x_wdt_resources),
.resources = wm831x_wdt_resources,
@@ -1326,6 +1170,9 @@ static struct mfd_cell wm8320_devs[] = {
.resources = wm8320_dcdc4_buck_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-gpio",
.num_resources = ARRAY_SIZE(wm831x_gpio_resources),
.resources = wm831x_gpio_resources,
@@ -1405,11 +1252,6 @@ static struct mfd_cell wm8320_devs[] = {
.resources = wm831x_on_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1428,6 +1270,22 @@ static struct mfd_cell wm8320_devs[] = {
},
};
+static struct mfd_cell touch_devs[] = {
+ {
+ .name = "wm831x-touch",
+ .num_resources = ARRAY_SIZE(wm831x_touch_resources),
+ .resources = wm831x_touch_resources,
+ },
+};
+
+static struct mfd_cell rtc_devs[] = {
+ {
+ .name = "wm831x-rtc",
+ .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
+ .resources = wm831x_rtc_resources,
+ },
+};
+
static struct mfd_cell backlight_devs[] = {
{
.name = "wm831x-backlight",
@@ -1440,14 +1298,12 @@ static struct mfd_cell backlight_devs[] = {
int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
{
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int rev;
+ int rev, wm831x_num;
enum wm831x_parent parent;
int ret, i;
mutex_init(&wm831x->io_lock);
mutex_init(&wm831x->key_lock);
- mutex_init(&wm831x->auxadc_lock);
- init_completion(&wm831x->auxadc_done);
dev_set_drvdata(wm831x->dev, wm831x);
ret = wm831x_reg_read(wm831x, WM831X_PARENT_ID);
@@ -1592,45 +1448,51 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
}
}
+ /* Multiply by 10 as we have many subdevices of the same type */
+ if (pdata && pdata->wm831x_num)
+ wm831x_num = pdata->wm831x_num * 10;
+ else
+ wm831x_num = -1;
+
ret = wm831x_irq_init(wm831x, irq);
if (ret != 0)
goto err;
- if (wm831x->irq_base) {
- ret = request_threaded_irq(wm831x->irq_base +
- WM831X_IRQ_AUXADC_DATA,
- NULL, wm831x_auxadc_irq, 0,
- "auxadc", wm831x);
- if (ret < 0)
- dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n",
- ret);
- }
+ wm831x_auxadc_init(wm831x);
/* The core device is up, instantiate the subdevices. */
switch (parent) {
case WM8310:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8310_devs, ARRAY_SIZE(wm8310_devs),
NULL, wm831x->irq_base);
break;
case WM8311:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8311_devs, ARRAY_SIZE(wm8311_devs),
NULL, wm831x->irq_base);
+ if (!pdata || !pdata->disable_touch)
+ mfd_add_devices(wm831x->dev, wm831x_num,
+ touch_devs, ARRAY_SIZE(touch_devs),
+ NULL, wm831x->irq_base);
break;
case WM8312:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8312_devs, ARRAY_SIZE(wm8312_devs),
NULL, wm831x->irq_base);
+ if (!pdata || !pdata->disable_touch)
+ mfd_add_devices(wm831x->dev, wm831x_num,
+ touch_devs, ARRAY_SIZE(touch_devs),
+ NULL, wm831x->irq_base);
break;
case WM8320:
case WM8321:
case WM8325:
case WM8326:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8320_devs, ARRAY_SIZE(wm8320_devs),
NULL, wm831x->irq_base);
break;
@@ -1645,9 +1507,30 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
goto err_irq;
}
+ /* The RTC can only be used if the 32.768kHz crystal is
+ * enabled; this can't be controlled by software at runtime.
+ */
+ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to read clock status: %d\n", ret);
+ goto err_irq;
+ }
+
+ if (ret & WM831X_XTAL_ENA) {
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
+ rtc_devs, ARRAY_SIZE(rtc_devs),
+ NULL, wm831x->irq_base);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
+ goto err_irq;
+ }
+ } else {
+ dev_info(wm831x->dev, "32.768kHz clock disabled, no RTC\n");
+ }
+
if (pdata && pdata->backlight) {
/* Treat errors as non-critical */
- ret = mfd_add_devices(wm831x->dev, -1, backlight_devs,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
ARRAY_SIZE(backlight_devs), NULL,
wm831x->irq_base);
if (ret < 0)
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 42b928ec891..ada1835a545 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -348,6 +348,15 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
int i;
+ for (i = 0; i < ARRAY_SIZE(wm831x->gpio_update); i++) {
+ if (wm831x->gpio_update[i]) {
+ wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + i,
+ WM831X_GPN_INT_MODE | WM831X_GPN_POL,
+ wm831x->gpio_update[i]);
+ wm831x->gpio_update[i] = 0;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
/* If there's been a change in the mask write it back
* to the hardware. */
@@ -387,7 +396,7 @@ static void wm831x_irq_disable(struct irq_data *data)
static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
- int val, irq;
+ int irq;
irq = data->irq - wm831x->irq_base;
@@ -399,22 +408,30 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
}
+ /* Rebase the IRQ into the GPIO range so we've got a sensible array
+ * index.
+ */
+ irq -= WM831X_IRQ_GPIO_1;
+
+ /* We set the high bit to flag that we need an update; don't
+ * do the update here as we can be called with the bus lock
+ * held.
+ */
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
- val = WM831X_GPN_INT_MODE;
+ wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
break;
case IRQ_TYPE_EDGE_RISING:
- val = WM831X_GPN_POL;
+ wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
break;
case IRQ_TYPE_EDGE_FALLING:
- val = 0;
+ wm831x->gpio_update[irq] = 0x10000;
break;
default:
return -EINVAL;
}
- return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + irq,
- WM831X_GPN_INT_MODE | WM831X_GPN_POL, val);
+ return 0;
}
static struct irq_chip wm831x_irq_chip = {
@@ -432,7 +449,7 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
{
struct wm831x *wm831x = data;
unsigned int i;
- int primary;
+ int primary, status_addr;
int status_regs[WM831X_NUM_IRQ_REGS] = { 0 };
int read[WM831X_NUM_IRQ_REGS] = { 0 };
int *status;
@@ -467,8 +484,9 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
/* Hopefully there should only be one register to read
* each time otherwise we ought to do a block read. */
if (!read[offset]) {
- *status = wm831x_reg_read(wm831x,
- irq_data_to_status_reg(&wm831x_irqs[i]));
+ status_addr = irq_data_to_status_reg(&wm831x_irqs[i]);
+
+ *status = wm831x_reg_read(wm831x, status_addr);
if (*status < 0) {
dev_err(wm831x->dev,
"Failed to read IRQ status: %d\n",
@@ -477,26 +495,21 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
}
read[offset] = 1;
+
+ /* Ignore any bits that we don't think are masked */
+ *status &= ~wm831x->irq_masks_cur[offset];
+
+ /* Acknowledge now so we don't miss
+ * notifications while we handle.
+ */
+ wm831x_reg_write(wm831x, status_addr, *status);
}
- /* Report it if it isn't masked, or forget the status. */
- if ((*status & ~wm831x->irq_masks_cur[offset])
- & wm831x_irqs[i].mask)
+ if (*status & wm831x_irqs[i].mask)
handle_nested_irq(wm831x->irq_base + i);
- else
- *status &= ~wm831x_irqs[i].mask;
}
out:
- /* Touchscreen interrupts are handled specially in the driver */
- status_regs[0] &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
-
- for (i = 0; i < ARRAY_SIZE(status_regs); i++) {
- if (status_regs[i])
- wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i,
- status_regs[i]);
- }
-
return IRQ_HANDLED;
}
@@ -515,13 +528,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
0xffff);
}
- if (!pdata || !pdata->irq_base) {
- dev_err(wm831x->dev,
- "No interrupt base specified, no interrupts\n");
+ /* Try to dynamically allocate IRQs if no base is specified */
+ if (!pdata || !pdata->irq_base)
+ wm831x->irq_base = -1;
+ else
+ wm831x->irq_base = pdata->irq_base;
+
+ wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0,
+ WM831X_NUM_IRQS, 0);
+ if (wm831x->irq_base < 0) {
+ dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
+ wm831x->irq_base);
+ wm831x->irq_base = 0;
return 0;
}
- if (pdata->irq_cmos)
+ if (pdata && pdata->irq_cmos)
i = 0;
else
i = WM831X_IRQ_OD;
@@ -541,7 +563,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
}
wm831x->irq = irq;
- wm831x->irq_base = pdata->irq_base;
/* Register them with genirq */
for (cur_irq = wm831x->irq_base;
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index ed4b22a167b..8a1fafd0bf7 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -473,17 +473,13 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
{
int ret, cur_irq, i;
int flags = IRQF_ONESHOT;
+ int irq_base = -1;
if (!irq) {
dev_warn(wm8350->dev, "No interrupt support, no core IRQ\n");
return 0;
}
- if (!pdata || !pdata->irq_base) {
- dev_warn(wm8350->dev, "No interrupt support, no IRQ base\n");
- return 0;
- }
-
/* Mask top level interrupts */
wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
@@ -502,7 +498,17 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
wm8350->chip_irq = irq;
wm8350->irq_base = pdata->irq_base;
- if (pdata->irq_high) {
+ if (pdata && pdata->irq_base > 0)
+ irq_base = pdata->irq_base;
+
+ wm8350->irq_base = irq_alloc_descs(irq_base, 0, ARRAY_SIZE(wm8350_irqs), 0);
+ if (wm8350->irq_base < 0) {
+ dev_warn(wm8350->dev, "Allocating irqs failed with %d\n",
+ wm8350->irq_base);
+ return 0;
+ }
+
+ if (pdata && pdata->irq_high) {
flags |= IRQF_TRIGGER_HIGH;
wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index e198d40292e..96479c9b172 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -316,7 +316,7 @@ static int wm8994_suspend(struct device *dev)
static int wm8994_resume(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
- int ret;
+ int ret, i;
/* We may have lied to the PM core about suspending */
if (!wm8994->suspended)
@@ -329,10 +329,16 @@ static int wm8994_resume(struct device *dev)
return ret;
}
- ret = wm8994_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK,
- WM8994_NUM_IRQ_REGS * 2, &wm8994->irq_masks_cur);
- if (ret < 0)
- dev_err(dev, "Failed to restore interrupt masks: %d\n", ret);
+ /* Write register at a time as we use the cache on the CPU so store
+ * it in native endian.
+ */
+ for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
+ ret = wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK
+ + i, wm8994->irq_masks_cur[i]);
+ if (ret < 0)
+ dev_err(dev, "Failed to restore interrupt masks: %d\n",
+ ret);
+ }
ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
&wm8994->ldo_regs);
@@ -403,7 +409,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
default:
BUG();
- return -EINVAL;
+ goto err;
}
wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
@@ -425,7 +431,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
default:
BUG();
- return -EINVAL;
+ goto err;
}
ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
@@ -476,13 +482,18 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err_enable;
}
- switch (ret) {
- case 0:
- case 1:
- if (wm8994->type == WM8994)
+ switch (wm8994->type) {
+ case WM8994:
+ switch (ret) {
+ case 0:
+ case 1:
dev_warn(wm8994->dev,
"revision %c not fully supported\n",
'A' + ret);
+ break;
+ default:
+ break;
+ }
break;
default:
break;
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 71c6e8f9aed..d682f7bd112 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -231,12 +231,6 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
status[i] &= ~wm8994->irq_masks_cur[i];
}
- /* Report */
- for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
- if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
- handle_nested_irq(wm8994->irq_base + i);
- }
-
/* Ack any unmasked IRQs */
for (i = 0; i < ARRAY_SIZE(status); i++) {
if (status[i])
@@ -244,6 +238,12 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
status[i]);
}
+ /* Report */
+ for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
+ if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
+ handle_nested_irq(wm8994->irq_base + i);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 5b0dba6d4ef..d724a18b528 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1989,14 +1989,20 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
return -EINVAL;
}
+ /*
+ * It's important to set the bp->state to the value different from
+ * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
+ * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
+ */
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+ smp_mb();
+
/* Stop Tx */
bnx2x_tx_disable(bp);
#ifdef BCM_CNIC
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
#endif
- bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
- smp_mb();
bp->rx_mode = BNX2X_RX_MODE_NONE;
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 06727f32e50..dc24de40e33 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -1204,6 +1204,8 @@ struct drv_port_mb {
#define LINK_STATUS_PFC_ENABLED 0x20000000
+ #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+
u32 port_stx;
u32 stat_nig_timer;
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index bcd8f003862..d45b1555a60 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1546,6 +1546,12 @@ static void bnx2x_umac_enable(struct link_params *params,
vars->line_speed);
break;
}
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
+
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
udelay(50);
@@ -1661,10 +1667,20 @@ static void bnx2x_xmac_disable(struct link_params *params)
{
u8 port = params->port;
struct bnx2x *bp = params->bp;
- u32 xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC) {
+ /*
+ * Send an indication to change the state in the NIG back to XON
+ * Clearing this bit enables the next set of this bit to get
+ * rising edge
+ */
+ pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl & ~(1<<1)));
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl | (1<<1)));
DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
usleep_range(1000, 1000);
@@ -1729,6 +1745,10 @@ static int bnx2x_emac_enable(struct link_params *params,
DP(NETIF_MSG_LINK, "enabling EMAC\n");
+ /* Disable BMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
/* enable emac and not bmac */
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
@@ -2583,12 +2603,6 @@ static int bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
- if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) {
- REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LSS_STATUS,
- wb_data, 2);
- if (wb_data[0] > 0)
- return -ESRCH;
- }
return 0;
}
@@ -2654,16 +2668,6 @@ static int bnx2x_bmac2_enable(struct link_params *params,
udelay(30);
bnx2x_update_pfc_bmac2(params, vars, is_lb);
- if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) {
- REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LSS_STAT,
- wb_data, 2);
- if (wb_data[0] > 0) {
- DP(NETIF_MSG_LINK, "Got bad LSS status 0x%x\n",
- wb_data[0]);
- return -ESRCH;
- }
- }
-
return 0;
}
@@ -2949,7 +2953,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
u32 val;
u16 i;
int rc = 0;
-
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
/* address */
val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
@@ -3003,6 +3009,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
}
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
return rc;
}
@@ -3012,6 +3021,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
u32 tmp;
u8 i;
int rc = 0;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
/* address */
@@ -3065,7 +3077,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
}
}
-
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
return rc;
}
@@ -4353,6 +4367,9 @@ void bnx2x_link_status_update(struct link_params *params,
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
vars->phy_flags = PHY_XGXS_FLAG;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
if (vars->link_up) {
DP(NETIF_MSG_LINK, "phy link up\n");
@@ -4444,6 +4461,8 @@ void bnx2x_link_status_update(struct link_params *params,
/* indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
}
/* Sync media type */
@@ -5903,20 +5922,30 @@ int bnx2x_set_led(struct link_params *params,
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED,
(tmp | EMAC_LED_OVERRIDE));
- return rc;
+ /*
+ * return here without enabling traffic
+ * LED blink andsetting rate in ON mode.
+ * In oper mode, enabling LED blink
+ * and setting rate is needed.
+ */
+ if (mode == LED_MODE_ON)
+ return rc;
}
- } else if (SINGLE_MEDIA_DIRECT(params) &&
- (CHIP_IS_E1x(bp) ||
- CHIP_IS_E2(bp))) {
+ } else if (SINGLE_MEDIA_DIRECT(params)) {
/*
* This is a work-around for HW issue found when link
* is up in CL73
*/
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
- } else {
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp) ||
+ (mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ else
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ hw_led_mode);
+ } else
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
- }
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
@@ -6160,6 +6189,7 @@ static int bnx2x_update_link_down(struct link_params *params,
/* update shared memory */
vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG |
LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
@@ -6197,7 +6227,8 @@ static int bnx2x_update_link_up(struct link_params *params,
u8 port = params->port;
int rc = 0;
- vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status |= (LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG);
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
@@ -7998,6 +8029,9 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+ /* Restart microcode to re-read the new mode */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
}
@@ -8116,7 +8150,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].
config));
-
bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
gpio_port);
@@ -8125,8 +8158,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
* Disable transmit for this module
*/
phy->media_type = ETH_PHY_NOT_PRESENT;
- if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
- PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+ if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
+ CHIP_IS_E3(bp))
bnx2x_sfp_set_transmitter(params, phy, 0);
}
}
@@ -8228,9 +8262,6 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
u16 cnt, val, tmp1;
struct bnx2x *bp = params->bp;
- /* SPF+ PHY: Set flag to check for Tx error */
- vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
-
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
@@ -8414,9 +8445,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
- /* SPF+ PHY: Set flag to check for Tx error */
- vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
-
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
bnx2x_wait_reset_complete(bp, phy, params);
@@ -8585,9 +8613,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
- /* SPF+ PHY: Set flag to check for Tx error */
- vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
-
bnx2x_wait_reset_complete(bp, phy, params);
rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
/* Should be 0x6 to enable XS on Tx side. */
@@ -9243,7 +9268,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
if (phy->req_duplex == DUPLEX_FULL)
autoneg_val |= (1<<8);
- bnx2x_cl45_write(bp, phy,
+ /*
+ * Always write this if this is not 84833.
+ * For 84833, write it only when it's a forced speed.
+ */
+ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ ((autoneg_val & (1<<12)) == 0))
+ bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
@@ -9257,13 +9288,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
0x3200);
- } else if (phy->req_line_speed != SPEED_10 &&
- phy->req_line_speed != SPEED_100) {
+ } else
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
1);
- }
+
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, params);
@@ -9756,11 +9786,9 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_CTL_DEVAD,
0x400f, &val16);
- /* Put to low power mode on newer FW */
- if ((val16 & 0x303f) > 0x1009)
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x800);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x800);
}
}
@@ -10191,8 +10219,15 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
u32 cfg_pin;
u8 port;
- /* This works with E3 only, no need to check the chip
- before determining the port. */
+ /*
+ * In case of no EPIO routed to reset the GPHY, put it
+ * in low power mode.
+ */
+ bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
+ /*
+ * This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
port = params->port;
cfg_pin = (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
@@ -10603,7 +10638,8 @@ static struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
.def_md_devad = 0,
- .flags = FLAGS_HW_LOCK_REQUIRED,
+ .flags = (FLAGS_HW_LOCK_REQUIRED |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10729,7 +10765,8 @@ static struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
.addr = 0xff,
.def_md_devad = 0,
- .flags = FLAGS_INIT_XGXS_FIRST,
+ .flags = (FLAGS_INIT_XGXS_FIRST |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10760,7 +10797,8 @@ static struct bnx2x_phy phy_8726 = {
.addr = 0xff,
.def_md_devad = 0,
.flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_INIT_XGXS_FIRST),
+ FLAGS_INIT_XGXS_FIRST |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10791,7 +10829,8 @@ static struct bnx2x_phy phy_8727 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
.addr = 0xff,
.def_md_devad = 0,
- .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .flags = (FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11112,6 +11151,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
*/
if (CHIP_REV(bp) == CHIP_REV_Ax)
phy->flags |= FLAGS_MDC_MDIO_WA;
+ else
+ phy->flags |= FLAGS_MDC_MDIO_WA_B0;
} else {
switch (switch_cfg) {
case SWITCH_CFG_1G:
@@ -11500,13 +11541,12 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
* Set WC to loopback mode since link is required to provide clock
* to the XMAC in 20G mode
*/
- if (vars->line_speed == SPEED_20000) {
- bnx2x_set_aer_mmd(params, &params->phy[0]);
- bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
- params->phy[INT_PHY].config_loopback(
+ bnx2x_set_aer_mmd(params, &params->phy[0]);
+ bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
+ params->phy[INT_PHY].config_loopback(
&params->phy[INT_PHY],
params);
- }
+
bnx2x_xmac_enable(params, vars, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
@@ -11684,12 +11724,16 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
if (reset_ext_phy) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, port);
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
- if (params->phy[phy_index].link_reset)
+ if (params->phy[phy_index].link_reset) {
+ bnx2x_set_aer_mmd(params,
+ &params->phy[phy_index]);
params->phy[phy_index].link_reset(
&params->phy[phy_index],
params);
+ }
if (params->phy[phy_index].flags &
FLAGS_REARM_LATCH_SIGNAL)
clear_latch_ind = 1;
@@ -12178,10 +12222,6 @@ static void bnx2x_analyze_link_error(struct link_params *params,
u8 led_mode;
u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
- /*DP(NETIF_MSG_LINK, "CHECK LINK: %x half_open:%x-> lss:%x\n",
- vars->link_up,
- half_open_conn, lss_status);*/
-
if ((lss_status ^ half_open_conn) == 0)
return;
@@ -12194,6 +12234,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
* b. Update link_vars->link_up
*/
if (lss_status) {
+ DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
vars->link_status &= ~LINK_STATUS_LINK_UP;
vars->link_up = 0;
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
@@ -12203,6 +12244,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
*/
led_mode = LED_MODE_OFF;
} else {
+ DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
vars->link_status |= LINK_STATUS_LINK_UP;
vars->link_up = 1;
vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
@@ -12219,6 +12261,15 @@ static void bnx2x_analyze_link_error(struct link_params *params,
bnx2x_notify_link_changed(bp);
}
+/******************************************************************************
+* Description:
+* This function checks for half opened connection change indication.
+* When such change occurs, it calls the bnx2x_analyze_link_error
+* to check if Remote Fault is set or cleared. Reception of remote fault
+* status message in the MAC indicates that the peer's MAC has detected
+* a fault, for example, due to break in the TX side of fiber.
+*
+******************************************************************************/
static void bnx2x_check_half_open_conn(struct link_params *params,
struct link_vars *vars)
{
@@ -12229,9 +12280,28 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
return;
- if (!CHIP_IS_E3(bp) &&
+ if (CHIP_IS_E3(bp) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))) {
+ (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+ /* Check E3 XMAC */
+ /*
+ * Note that link speed cannot be queried here, since it may be
+ * zero while link is down. In case UMAC is active, LSS will
+ * simply not be set
+ */
+ mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Clear stick bits (Requires rising edge) */
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
+ lss_status = 1;
+
+ bnx2x_analyze_link_error(params, vars, lss_status);
+ } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
/* Check E1X / E2 BMAC */
u32 lss_status_reg;
u32 wb_data[2];
@@ -12253,14 +12323,20 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
+ u16 phy_idx;
if (!params) {
- DP(NETIF_MSG_LINK, "Ininitliazed params !\n");
+ DP(NETIF_MSG_LINK, "Uninitialized params !\n");
return;
}
- /* DP(NETIF_MSG_LINK, "Periodic called vars->phy_flags 0x%x speed 0x%x
- RESET_REG_2 0x%x\n", vars->phy_flags, vars->line_speed,
- REG_RD(bp, MISC_REG_RESET_REG_2)); */
- bnx2x_check_half_open_conn(params, vars);
+
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+ bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
+ bnx2x_check_half_open_conn(params, vars);
+ break;
+ }
+ }
+
if (CHIP_IS_E3(bp))
bnx2x_check_over_curr(params, vars);
}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 6a7708d5da3..c12db6da213 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -145,6 +145,8 @@ struct bnx2x_phy {
#define FLAGS_SFP_NOT_APPROVED (1<<7)
#define FLAGS_MDC_MDIO_WA (1<<8)
#define FLAGS_DUMMY_READ (1<<9)
+#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
+#define FLAGS_TX_ERROR_CHECK (1<<12)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -276,7 +278,6 @@ struct link_vars {
#define PHY_PHYSICAL_LINK_FLAG (1<<2)
#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
#define PHY_OVER_CURRENT_FLAG (1<<4)
-#define PHY_TX_ERROR_CHECK_FLAG (1<<5)
u8 mac_type;
#define MAC_TYPE_NONE 0
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 02461fef875..27b5ecb1183 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -4771,9 +4771,11 @@
The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
header pointer. */
#define UCM_REG_XX_TABLE 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8)
#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4)
#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1)
#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
@@ -5622,8 +5624,9 @@
#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
-#define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+#define EMAC_MDIO_STATUS_10MB (1L<<1)
#define EMAC_MODE_25G_MODE (1L<<5)
#define EMAC_MODE_HALF_DUPLEX (1L<<1)
#define EMAC_MODE_PORT_GMII (2L<<2)
@@ -5634,6 +5637,7 @@
#define EMAC_REG_EMAC_MAC_MATCH 0x10
#define EMAC_REG_EMAC_MDIO_COMM 0xac
#define EMAC_REG_EMAC_MDIO_MODE 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
#define EMAC_REG_EMAC_MODE 0x0
#define EMAC_REG_EMAC_RX_MODE 0xc8
#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c5f0f04219f..5548d464261 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -838,6 +838,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ E1000_WRITE_FLUSH();
msleep(10);
/* Test each interrupt */
@@ -856,6 +857,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, mask);
ew32(ICS, mask);
+ E1000_WRITE_FLUSH();
msleep(10);
if (adapter->test_icr & mask) {
@@ -873,6 +875,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMS, mask);
ew32(ICS, mask);
+ E1000_WRITE_FLUSH();
msleep(10);
if (!(adapter->test_icr & mask)) {
@@ -890,6 +893,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
+ E1000_WRITE_FLUSH();
msleep(10);
if (adapter->test_icr) {
@@ -901,6 +905,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ E1000_WRITE_FLUSH();
msleep(10);
/* Unhook test interrupt handler */
@@ -1394,6 +1399,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
if (unlikely(++k == txdr->count)) k = 0;
}
ew32(TDT, k);
+ E1000_WRITE_FLUSH();
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 1698622af43..8545c7aa93e 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -446,6 +446,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
/* Must reset the PHY before resetting the MAC */
if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ E1000_WRITE_FLUSH();
msleep(5);
}
@@ -3752,6 +3753,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(1);
}
@@ -3824,6 +3826,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
eecd &= ~E1000_EECD_SK; /* Lower SCK */
ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(hw->eeprom.delay_usec);
} else if (hw->eeprom.type == e1000_eeprom_microwire) {
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index c0ecb2d9fdb..e4f42257c24 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1313,6 +1313,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
@@ -1347,6 +1348,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index cb1a3623253..06d88f316dc 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -28,8 +28,8 @@
/* ethtool support for e1000 */
-#include <linux/interrupt.h>
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -964,6 +964,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
usleep_range(10000, 20000);
/* Test each interrupt */
@@ -996,6 +997,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, mask);
ew32(ICS, mask);
+ e1e_flush();
usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
@@ -1014,6 +1016,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMS, mask);
ew32(ICS, mask);
+ e1e_flush();
usleep_range(10000, 20000);
if (!(adapter->test_icr & mask)) {
@@ -1032,6 +1035,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
+ e1e_flush();
usleep_range(10000, 20000);
if (adapter->test_icr) {
@@ -1043,6 +1047,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
usleep_range(10000, 20000);
/* Unhook test interrupt handler */
@@ -1276,6 +1281,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_FD); /* Force Duplex to FULL */
ew32(CTRL, ctrl_reg);
+ e1e_flush();
udelay(500);
return 0;
@@ -1418,6 +1424,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
*/
#define E1000_SERDES_LB_ON 0x410
ew32(SCTL, E1000_SERDES_LB_ON);
+ e1e_flush();
usleep_range(10000, 20000);
return 0;
@@ -1513,6 +1520,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
hw->phy.media_type == e1000_media_type_internal_serdes) {
#define E1000_SERDES_LB_OFF 0x400
ew32(SCTL, E1000_SERDES_LB_OFF);
+ e1e_flush();
usleep_range(10000, 20000);
break;
}
@@ -1592,6 +1600,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
k = 0;
}
ew32(TDT, k);
+ e1e_flush();
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index c1752124f3c..4e36978b8fd 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -283,6 +283,7 @@ static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
ew32(CTRL, ctrl);
+ e1e_flush();
udelay(10);
ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
ew32(CTRL, ctrl);
@@ -1230,9 +1231,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
ew32(CTRL, reg);
ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+ e1e_flush();
udelay(20);
ew32(CTRL, ctrl_reg);
ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
udelay(20);
out:
@@ -2134,8 +2137,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
ret_val = 0;
for (i = 0; i < words; i++) {
- if ((dev_spec->shadow_ram) &&
- (dev_spec->shadow_ram[offset+i].modified)) {
+ if (dev_spec->shadow_ram[offset+i].modified) {
data[i] = dev_spec->shadow_ram[offset+i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw,
@@ -3090,6 +3092,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ret_val = e1000_acquire_swflag_ich8lan(hw);
e_dbg("Issuing a global reset to ich8lan\n");
ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ /* cannot issue a flush here because it hangs the hardware */
msleep(20);
if (!ret_val)
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 65580b40594..7898a67d650 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1986,6 +1986,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
+ e1e_flush();
udelay(1);
/*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 4353ad56cf1..ab4be80f7ab 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -31,12 +31,12 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 2a6ee13285b..8666476cb9b 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -537,6 +537,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
@@ -609,6 +610,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index 7dcd65cede5..40407124e72 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -285,6 +285,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
wr32(E1000_EECD, eecd);
+ wrfl();
udelay(1);
timeout = NVM_MAX_RETRY_SPI;
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ff244ce803c..414b0225be8 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1225,6 +1225,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
+ wrfl();
msleep(10);
/* Define all writable bits for ICS */
@@ -1268,6 +1269,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, mask);
wr32(E1000_ICS, mask);
+ wrfl();
msleep(10);
if (adapter->test_icr & mask) {
@@ -1289,6 +1291,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
+ wrfl();
msleep(10);
if (!(adapter->test_icr & mask)) {
@@ -1310,6 +1313,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, ~mask);
wr32(E1000_ICS, ~mask);
+ wrfl();
msleep(10);
if (adapter->test_icr & mask) {
@@ -1321,6 +1325,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
+ wrfl();
msleep(10);
/* Unhook test interrupt handler */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index dc599059512..40d4c405fd7 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1052,6 +1052,7 @@ msi_only:
kfree(adapter->vf_data);
adapter->vf_data = NULL;
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
msleep(100);
dev_info(&adapter->pdev->dev, "IOV Disabled\n");
}
@@ -2022,7 +2023,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (hw->bus.func == 0)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type == e1000_82580)
+ else if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
@@ -2198,6 +2199,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
kfree(adapter->vf_data);
adapter->vf_data = NULL;
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
msleep(100);
dev_info(&pdev->dev, "IOV Disabled\n");
}
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1330c8e932d..40ed066e3ef 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1226,6 +1226,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
/* disable transmits */
txdctl = er32(TXDCTL(0));
ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+ e1e_flush();
msleep(10);
/* Setup the HW Tx Head and Tail descriptor pointers */
@@ -1306,6 +1307,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
/* disable receives */
rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+ e1e_flush();
msleep(10);
rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 954f6e938fb..8b1c3484d27 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2405,8 +2405,6 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
* addresses making a subsystem device table necessary.
*/
#ifdef CONFIG_PCI
-#define PCIID_VENDOR_INTEL 0x8086
-#define PCIID_VENDOR_ALI 0x10b9
static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
/*
* Subsystems needing entries:
@@ -2416,7 +2414,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
*/
{
/* Guessed entry */
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x103c,
.subdevice = 0x08bc,
@@ -2429,7 +2427,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "HP nx5000 family",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x103c,
.subdevice = 0x088c,
@@ -2443,7 +2441,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "HP nc8000 family",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x103c,
.subdevice = 0x0890,
@@ -2456,7 +2454,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "HP nc6000 family",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x0e11,
.subdevice = 0x0860,
@@ -2471,7 +2469,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
},
{
/* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */
- .vendor = PCIID_VENDOR_INTEL,
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = 0x24c0,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
@@ -2484,7 +2482,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801CAM ISA bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801CAM ISA bridge */
.device = 0x248c,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
@@ -2498,7 +2496,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
},
{
/* 82801DBM (ICH4-M) LPC Interface Bridge */
- .vendor = PCIID_VENDOR_INTEL,
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = 0x24cc,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
@@ -2512,7 +2510,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
},
{
/* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */
- .vendor = PCIID_VENDOR_ALI,
+ .vendor = PCI_VENDOR_ID_AL,
.device = 0x1533,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index c982ab9f900..38b362b6785 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -57,6 +57,7 @@ ixgb_raise_clock(struct ixgb_hw *hw,
*/
*eecd_reg = *eecd_reg | IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
@@ -75,6 +76,7 @@ ixgb_lower_clock(struct ixgb_hw *hw,
*/
*eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
@@ -112,6 +114,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
eecd_reg |= IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
@@ -206,21 +209,25 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
/* Deselect EEPROM */
eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Clock high */
eecd_reg |= IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Select EEPROM */
eecd_reg |= IXGB_EECD_CS;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Clock low */
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
@@ -239,11 +246,13 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
/* Rising edge of clock */
eecd_reg |= IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Falling edge of clock */
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 6cb2e42ff4c..3d61a9e4faf 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -149,6 +149,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
*/
IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
+ IXGB_WRITE_FLUSH(hw);
msleep(IXGB_DELAY_BEFORE_RESET);
/* Issue a global reset to the MAC. This will reset the chip's
@@ -1220,6 +1221,7 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw)
ctrl &= ~IXGB_CTRL0_SDP2;
ctrl |= IXGB_CTRL0_SDP3;
IXGB_WRITE_REG(hw, CTRL0, ctrl);
+ IXGB_WRITE_FLUSH(hw);
/* SerDes needs extra delay */
msleep(IXGB_SUN_PHY_RESET_DELAY);
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 3b3dd4df4c5..34f30ec79c2 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -213,6 +213,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_tn:
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
break;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 777051f54e5..fc1375f26fe 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -2632,6 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
usleep_range(10000, 20000);
}
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dc649553a0a..82d4244c6e1 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1378,6 +1378,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Disable all the interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
/* Test each interrupt */
@@ -1398,6 +1399,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
@@ -1415,6 +1417,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
if (!(adapter->test_icr &mask)) {
@@ -1435,6 +1438,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
if (adapter->test_icr) {
@@ -1446,6 +1450,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Disable all the interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
/* Unhook test interrupt handler */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 1be617545dc..e86297b3273 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -184,6 +184,7 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ IXGBE_WRITE_FLUSH(hw);
/* take a breather then clean up driver data */
msleep(100);
@@ -1005,7 +1006,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
unsigned long event = *(unsigned long *)data;
- if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+ if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
return 0;
switch (event) {
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 735f686c3b3..f7ca3511b9f 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1585,6 +1585,7 @@ static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
*i2cctl |= IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
udelay(IXGBE_I2C_T_RISE);
@@ -1605,6 +1606,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
*i2cctl &= ~IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
udelay(IXGBE_I2C_T_FALL);
@@ -1628,6 +1630,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*i2cctl &= ~IXGBE_I2C_DATA_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index bec30ed91ad..2696c78e9f4 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -162,6 +162,7 @@ mac_reset_top:
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
msleep(50);
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 0fcdc25699d..dc4e305a108 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -322,6 +322,9 @@ static void macb_tx(struct macb *bp)
for (i = 0; i < TX_RING_SIZE; i++)
bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ /* Add wrap bit */
+ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
/* free transmit buffer in upper layer*/
for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
struct ring_info *rp = &bp->tx_skb[tail];
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index 5e710917806..5ada5b46911 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -128,7 +128,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
- context->n_mac = 0x7;
+ context->n_mac = 0x2;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
base_qpn);
context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index c94b3426d35..f0ee35df4dd 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1117,6 +1117,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->port = port;
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
+ info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ (port - 1) * (1 << log_num_mac);
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 1f95afda684..609e0ec14ce 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -258,9 +258,12 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
if (validate_index(dev, table, index))
goto out;
- table->entries[index] = 0;
- mlx4_set_port_mac_table(dev, port, table->entries);
- --table->total;
+ /* Check whether this address has reference count */
+ if (!(--table->refs[index])) {
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
+ }
out:
mutex_unlock(&table->mutex);
}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index cd6c2317e29..ed47585a686 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9201,7 +9201,7 @@ static int __devinit niu_ldg_init(struct niu *np)
first_chan = 0;
for (i = 0; i < port; i++)
- first_chan += parent->rxchan_per_port[port];
+ first_chan += parent->rxchan_per_port[i];
num_chan = parent->rxchan_per_port[port];
for (i = first_chan; i < (first_chan + num_chan); i++) {
@@ -9217,7 +9217,7 @@ static int __devinit niu_ldg_init(struct niu *np)
first_chan = 0;
for (i = 0; i < port; i++)
- first_chan += parent->txchan_per_port[port];
+ first_chan += parent->txchan_per_port[i];
num_chan = parent->txchan_per_port[port];
for (i = first_chan; i < (first_chan + num_chan); i++) {
err = niu_ldg_assign_ldn(np, parent,
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 1cd9394c335..cffbc0373fa 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -809,7 +809,7 @@ static int smc91c92_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
char *name;
- int i, j, rev;
+ int i, rev, j = 0;
unsigned int ioaddr;
u_long mir;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7d9c650f395..02339b3352e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -239,6 +239,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_LINKSYS, 0x1032,
@@ -1091,6 +1092,21 @@ rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
}
+struct exgmac_reg {
+ u16 addr;
+ u16 mask;
+ u32 val;
+};
+
+static void rtl_write_exgmac_batch(void __iomem *ioaddr,
+ const struct exgmac_reg *r, int len)
+{
+ while (len-- > 0) {
+ rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
+ r++;
+ }
+}
+
static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
{
u8 value = 0xff;
@@ -3116,6 +3132,18 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
RTL_W32(MAC0, low);
RTL_R32(MAC0);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ const struct exgmac_reg e[] = {
+ { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
+ { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
+ { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
+ { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
+ low >> 16 },
+ };
+
+ rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
+ }
+
RTL_W8(Cfg9346, Cfg9346_Lock);
spin_unlock_irq(&tp->lock);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 8ad7bfbaa3a..3c0f1312b39 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1825,6 +1825,16 @@ static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
}
+static int sis190_mac_addr(struct net_device *dev, void *p)
+{
+ int rc;
+
+ rc = eth_mac_addr(dev, p);
+ if (!rc)
+ sis190_init_rxfilter(dev);
+ return rc;
+}
+
static const struct net_device_ops sis190_netdev_ops = {
.ndo_open = sis190_open,
.ndo_stop = sis190_close,
@@ -1833,7 +1843,7 @@ static const struct net_device_ops sis190_netdev_ops = {
.ndo_tx_timeout = sis190_tx_timeout,
.ndo_set_multicast_list = sis190_set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = sis190_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sis190_netpoll,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index fd622a66ebb..a03336e086d 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -53,7 +53,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "01-June-2011"
+#define DRIVER_VERSION "04-Aug-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -163,35 +163,8 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
-static int
-cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
- void *data, u16 flags, u16 *actlen, u16 timeout)
-{
- int err;
-
- err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
- usb_rcvctrlpipe(ctx->udev, 0) :
- usb_sndctrlpipe(ctx->udev, 0),
- req->bNotificationType, req->bmRequestType,
- req->wValue,
- req->wIndex, data,
- req->wLength, timeout);
-
- if (err < 0) {
- if (actlen)
- *actlen = 0;
- return err;
- }
-
- if (actlen)
- *actlen = err;
-
- return 0;
-}
-
static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
{
- struct usb_cdc_notification req;
u32 val;
u8 flags;
u8 iface_no;
@@ -200,14 +173,14 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
-
- err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
- if (err) {
+ err = usb_control_msg(ctx->udev,
+ usb_rcvctrlpipe(ctx->udev, 0),
+ USB_CDC_GET_NTB_PARAMETERS,
+ USB_TYPE_CLASS | USB_DIR_IN
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &ctx->ncm_parm,
+ sizeof(ctx->ncm_parm), 10000);
+ if (err < 0) {
pr_debug("failed GET_NTB_PARAMETERS\n");
return 1;
}
@@ -253,31 +226,26 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* inform device about NTB input size changes */
if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
-
- req.wLength = 8;
- ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
- ndp_in_sz.wNtbInMaxDatagrams =
- cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
- ndp_in_sz.wReserved = 0;
- err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
- 1000);
+ err = usb_control_msg(ctx->udev,
+ usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &ndp_in_sz, 8, 1000);
} else {
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
-
- req.wLength = 4;
- err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
- NULL, 1000);
+ err = usb_control_msg(ctx->udev,
+ usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &dwNtbInMaxSize, 4, 1000);
}
- if (err)
+ if (err < 0)
pr_debug("Setting NTB Input Size failed\n");
}
@@ -332,29 +300,24 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* set CRC Mode */
if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_CRC_MODE;
- req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 0;
-
- err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
- if (err)
+ err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_CRC_MODE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_CRC_NOT_APPENDED,
+ iface_no, NULL, 0, 1000);
+ if (err < 0)
pr_debug("Setting CRC mode off failed\n");
}
/* set NTB format, if both formats are supported */
if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
- req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 0;
-
- err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
- if (err)
+ err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS
+ | USB_DIR_OUT | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0, 1000);
+ if (err < 0)
pr_debug("Setting NTB format to 16-bit failed\n");
}
@@ -364,17 +327,13 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
__le16 max_datagram_size;
u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = cpu_to_le16(2);
-
- err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
- 1000);
- if (err) {
+ err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
+ USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size,
+ 2, 1000);
+ if (err < 0) {
pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
CDC_NCM_MIN_DATAGRAM_SIZE);
} else {
@@ -395,17 +354,15 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
CDC_NCM_MIN_DATAGRAM_SIZE;
/* if value changed, update device */
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 2;
- max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
-
- err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
- 0, NULL, 1000);
- if (err)
+ err = usb_control_msg(ctx->udev,
+ usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0,
+ iface_no, &max_datagram_size,
+ 2, 1000);
+ if (err < 0)
pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
}
@@ -671,7 +628,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
u32 rem;
u32 offset;
u32 last_offset;
- u16 n = 0;
+ u16 n = 0, index;
u8 ready2send = 0;
/* if there is a remaining skb, it gets priority */
@@ -859,8 +816,8 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
- ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
- ctx->tx_ndp_modulus);
+ index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus);
+ ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index);
memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
ctx->tx_seq++;
@@ -873,12 +830,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
- memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
+ memcpy(((u8 *)skb_out->data) + index,
&(ctx->tx_ncm.ndp16),
sizeof(ctx->tx_ncm.ndp16));
- memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
- sizeof(ctx->tx_ncm.ndp16),
+ memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16),
&(ctx->tx_ncm.dpe16),
(ctx->tx_curr_frame_num + 1) *
sizeof(struct usb_cdc_ncm_dpe16));
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 9ff7c30573b..44d9d8d5649 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -309,11 +309,7 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
u8 i;
u32 val;
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
+ if (ah->is_pciexpress != true || ah->aspm_enabled != true)
return;
/* Nothing to do on restore for 11N */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 8efdec247c0..ad2bb2bf4e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -519,11 +519,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
int restore,
int power_off)
{
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
+ if (ah->is_pciexpress != true || ah->aspm_enabled != true)
return;
/* Nothing to do on restore for 11N */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8006ce0c735..8dcefe74f4c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -318,6 +318,14 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
}
+static void ath9k_hw_aspm_init(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (common->bus_ops->aspm_init)
+ common->bus_ops->aspm_init(common);
+}
+
/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
@@ -378,7 +386,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.additional_swba_backoff = 0;
ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
- ah->config.pcie_powersave_enable = 0;
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
@@ -598,7 +605,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (ah->is_pciexpress)
- ath9k_hw_configpcipowersave(ah, 0, 0);
+ ath9k_hw_aspm_init(ah);
else
ath9k_hw_disablepcie(ah);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 6acd0f975ae..c79889036ec 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -219,7 +219,6 @@ struct ath9k_ops_config {
int additional_swba_backoff;
int ack_6mb;
u32 cwm_ignore_extcca;
- u8 pcie_powersave_enable;
bool pcieSerDesWrite;
u8 pcie_clock_req;
u32 pcie_waen;
@@ -673,6 +672,7 @@ struct ath_hw {
bool sw_mgmt_crypto;
bool is_pciexpress;
+ bool aspm_enabled;
bool is_monitoring;
bool need_an_top2_fixup;
u16 tx_trig_level;
@@ -874,6 +874,7 @@ struct ath_bus_ops {
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
void (*extn_synch_en)(struct ath_common *common);
+ void (*aspm_init)(struct ath_common *common);
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index ac5107172f9..aa0ff7e2c92 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -670,8 +670,10 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
static void ath9k_init_txpower_limits(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath9k_channel *curchan = ah->curchan;
+ ah->txchainmask = common->tx_chainmask;
if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 3bad0b2cf9a..be4ea132981 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -16,6 +16,7 @@
#include <linux/nl80211.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/ath9k_platform.h>
#include "ath9k.h"
@@ -115,12 +116,38 @@ static void ath_pci_extn_synch_enable(struct ath_common *common)
pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
}
+static void ath_pci_aspm_init(struct ath_common *common)
+{
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct pci_dev *pdev = to_pci_dev(sc->dev);
+ struct pci_dev *parent;
+ int pos;
+ u8 aspm;
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ parent = pdev->bus->self;
+ if (WARN_ON(!parent))
+ return;
+
+ pos = pci_pcie_cap(parent);
+ pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm);
+ if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
+ ah->aspm_enabled = true;
+ /* Initialize PCIe PM and SERDES registers. */
+ ath9k_hw_configpcipowersave(ah, 0, 0);
+ }
+}
+
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
.extn_synch_en = ath_pci_extn_synch_enable,
+ .aspm_init = ath_pci_aspm_init,
};
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index c052a0d5cbd..5441ad19511 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -648,6 +648,8 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
0x74c5e40d),
PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
0x4b801a17),
+ PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.02",
+ 0x4b74baa0),
PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
0x7a954bd9, 0x74be00c6),
PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index dab67a12d73..73fe3cdf796 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -1746,7 +1746,11 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
}
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
return 0;
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index bd4b000733f..ecdc6e55742 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1235,7 +1235,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
iwl_legacy_print_rx_config_cmd(priv, ctx);
- goto set_tx_power;
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
+ return 0;
}
/* If we are currently associated and the new config requires
@@ -1315,7 +1320,6 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
iwl4965_init_sensitivity(priv);
-set_tx_power:
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 3eeb12ebe6e..c95cefd529d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -365,6 +365,7 @@ static struct iwl_base_params iwl5000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
+ .no_idle_support = true,
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 3e6bb734dcb..02817a43855 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -135,6 +135,7 @@ struct iwl_mod_params {
* @temperature_kelvin: temperature report by uCode in kelvin
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadhow register bit
+ * @no_idle_support: do not support idle mode
*/
struct iwl_base_params {
int eeprom_size;
@@ -156,6 +157,7 @@ struct iwl_base_params {
bool temperature_kelvin;
u32 max_event_log_size;
const bool shadow_reg_enable;
+ const bool no_idle_support;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index fb7e436b40c..69d4ec467dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -134,6 +134,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data)
{
bus->drv_data = drv_data;
+ pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data);
}
static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
@@ -454,8 +455,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
}
- pci_set_drvdata(pdev, bus);
-
bus->dev = &pdev->dev;
bus->irq = pdev->irq;
bus->ops = &pci_ops;
@@ -494,11 +493,12 @@ static void iwl_pci_down(struct iwl_bus *bus)
static void __devexit iwl_pci_remove(struct pci_dev *pdev)
{
- struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+ void *bus_specific = priv->bus->bus_specific;
- iwl_remove(bus->drv_data);
+ iwl_remove(priv);
- iwl_pci_down(bus);
+ iwl_pci_down(bus_specific);
}
#ifdef CONFIG_PM
@@ -506,20 +506,20 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
static int iwl_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
- return iwl_suspend(bus->drv_data);
+ return iwl_suspend(priv);
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_bus *bus = pci_get_drvdata(pdev);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -532,7 +532,7 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- return iwl_resume(bus->drv_data);
+ return iwl_resume(priv);
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 3ec619c6881..cd64df05f9e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -349,7 +349,8 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
if (priv->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
- else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+ else if (!priv->cfg->base_params->no_idle_support &&
+ priv->hw->conf.flags & IEEE80211_CONF_IDLE)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (iwl_tt_is_low_power_state(priv)) {
/* in thermal throttling low power state */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 3f7fc4a0b43..d7dbc00bcfb 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -239,7 +239,6 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
- PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
@@ -272,6 +271,7 @@ static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
+ PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.01", 0xd27deb1a), /* Lucent Orinoco */
#ifdef CONFIG_HERMES_PRISM
/* Only entries that certainly identify Prism chipset */
PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
@@ -321,6 +321,9 @@ static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
+
+ /* This may be Agere or Intersil Firmware */
+ PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
#endif
PCMCIA_DEVICE_NULL,
};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 84ab7d1acb6..ef67f6786a8 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -703,8 +703,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
/*
* Add space for the TXWI in front of the skb.
*/
- skb_push(entry->skb, TXWI_DESC_SIZE);
- memset(entry->skb, 0, TXWI_DESC_SIZE);
+ memset(skb_push(entry->skb, TXWI_DESC_SIZE), 0, TXWI_DESC_SIZE);
/*
* Register descriptor details in skb frame descriptor.
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 15cdc7e57fc..4cdf247a870 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -355,7 +355,8 @@ static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *
return CIPHER_NONE;
}
-static inline void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
+static inline void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc)
{
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 8efab398352..4ccf2380597 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -113,7 +113,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* due to possible race conditions in mac80211.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- goto exit_fail;
+ goto exit_free_skb;
/*
* Use the ATIM queue if appropriate and present.
@@ -127,7 +127,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
ERROR(rt2x00dev,
"Attempt to send packet over invalid queue %d.\n"
"Please file bug report to %s.\n", qid, DRV_PROJECT);
- goto exit_fail;
+ goto exit_free_skb;
}
/*
@@ -159,6 +159,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
exit_fail:
rt2x00queue_pause_queue(queue);
+ exit_free_skb:
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 5efd5783348..56f12358389 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1696,15 +1696,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
- /*find bridge info */
- pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
- for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
- if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
- pcipriv->ndis_adapter.pcibridge_vendor = tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Pci Bridge Vendor is found index: %d\n",
- tmp));
- break;
+ if (bridge_pdev) {
+ /*find bridge info if available */
+ pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
+ for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
+ if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
+ pcipriv->ndis_adapter.pcibridge_vendor = tmp;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Pci Bridge Vendor is found index:"
+ " %d\n", tmp));
+ break;
+ }
}
}
diff --git a/drivers/of/address.c b/drivers/of/address.c
index da1f4b9605d..72c33fbe451 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -610,6 +610,6 @@ void __iomem *of_iomap(struct device_node *np, int index)
if (of_address_to_resource(np, index, &res))
return NULL;
- return ioremap(res.start, 1 + res.end - res.start);
+ return ioremap(res.start, resource_size(&res));
}
EXPORT_SYMBOL(of_iomap);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 02ed36719de..3ff22e32b60 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -610,8 +610,9 @@ EXPORT_SYMBOL(of_find_node_by_phandle);
*
* The out_value is modified only if a valid u32 value can be decoded.
*/
-int of_property_read_u32_array(const struct device_node *np, char *propname,
- u32 *out_values, size_t sz)
+int of_property_read_u32_array(const struct device_node *np,
+ const char *propname, u32 *out_values,
+ size_t sz)
{
struct property *prop = of_find_property(np, propname, NULL);
const __be32 *val;
@@ -645,7 +646,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u32_array);
*
* The out_string pointer is modified only if a valid string can be decoded.
*/
-int of_property_read_string(struct device_node *np, char *propname,
+int of_property_read_string(struct device_node *np, const char *propname,
const char **out_string)
{
struct property *prop = of_find_property(np, propname, NULL);
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index 3007662ac61..ef0105fa52b 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -127,8 +127,8 @@ EXPORT_SYMBOL(of_gpio_count);
* gpio chips. This function performs only one sanity check: whether gpio
* is less than ngpios (that is specified in the gpio_chip).
*/
-static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
- const void *gpio_spec, u32 *flags)
+int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
+ const void *gpio_spec, u32 *flags)
{
const __be32 *gpio = gpio_spec;
const u32 n = be32_to_cpup(gpio);
@@ -152,6 +152,7 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
return n;
}
+EXPORT_SYMBOL(of_gpio_simple_xlate);
/**
* of_mm_gpiochip_add - Add memory mapped GPIO chip (bank)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a70fa89f76f..220285760b6 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -110,7 +110,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
}
-static struct acpi_dock_ops acpiphp_dock_ops = {
+static const struct acpi_dock_ops acpiphp_dock_ops = {
.handler = handle_hotplug_event_func,
};
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
index 4c3e94c0ae8..f56d7de7c75 100644
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ b/drivers/pcmcia/pxa2xx_balloon3.c
@@ -103,22 +103,12 @@ static int balloon3_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void balloon3_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void balloon3_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level balloon3_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = balloon3_pcmcia_hw_init,
.hw_shutdown = balloon3_pcmcia_hw_shutdown,
.socket_state = balloon3_pcmcia_socket_state,
.configure_socket = balloon3_pcmcia_configure_socket,
- .socket_init = balloon3_pcmcia_socket_init,
- .socket_suspend = balloon3_pcmcia_socket_suspend,
.first = 0,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 05913d0bbdb..63f4d5211ed 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -102,23 +102,12 @@ static int cmx255_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void cmx255_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void cmx255_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
-
static struct pcmcia_low_level cmx255_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = cmx255_pcmcia_hw_init,
.hw_shutdown = cmx255_pcmcia_shutdown,
.socket_state = cmx255_pcmcia_socket_state,
.configure_socket = cmx255_pcmcia_configure_socket,
- .socket_init = cmx255_pcmcia_socket_init,
- .socket_suspend = cmx255_pcmcia_socket_suspend,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index 5662646b84d..6ee42b4c3e6 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -82,23 +82,12 @@ static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void cmx270_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void cmx270_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
-
static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = cmx270_pcmcia_hw_init,
.hw_shutdown = cmx270_pcmcia_shutdown,
.socket_state = cmx270_pcmcia_socket_state,
.configure_socket = cmx270_pcmcia_configure_socket,
- .socket_init = cmx270_pcmcia_socket_init,
- .socket_suspend = cmx270_pcmcia_socket_suspend,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
index 443cb7fc872..c6dec572a05 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/drivers/pcmcia/pxa2xx_colibri.c
@@ -116,14 +116,6 @@ colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void colibri_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void colibri_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level colibri_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -135,9 +127,6 @@ static struct pcmcia_low_level colibri_pcmcia_ops = {
.socket_state = colibri_pcmcia_socket_state,
.configure_socket = colibri_pcmcia_configure_socket,
-
- .socket_init = colibri_pcmcia_socket_init,
- .socket_suspend = colibri_pcmcia_socket_suspend,
};
static struct platform_device *colibri_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 92016fe932b..aded706c0b9 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -128,22 +128,12 @@ static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return ret;
}
-static void mst_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void mst_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level mst_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = mst_pcmcia_hw_init,
.hw_shutdown = mst_pcmcia_hw_shutdown,
.socket_state = mst_pcmcia_socket_state,
.configure_socket = mst_pcmcia_configure_socket,
- .socket_init = mst_pcmcia_socket_init,
- .socket_suspend = mst_pcmcia_socket_suspend,
.nr = 2,
};
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
index 69f73670949..d589ad1dcd4 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/drivers/pcmcia/pxa2xx_palmld.c
@@ -65,14 +65,6 @@ static int palmld_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void palmld_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void palmld_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level palmld_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -84,9 +76,6 @@ static struct pcmcia_low_level palmld_pcmcia_ops = {
.socket_state = palmld_pcmcia_socket_state,
.configure_socket = palmld_pcmcia_configure_socket,
-
- .socket_init = palmld_pcmcia_socket_init,
- .socket_suspend = palmld_pcmcia_socket_suspend,
};
static struct platform_device *palmld_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index d0ad6a76bbd..9c6a04b2f71 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -117,14 +117,6 @@ static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return ret;
}
-static void palmtc_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void palmtc_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level palmtc_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -136,9 +128,6 @@ static struct pcmcia_low_level palmtc_pcmcia_ops = {
.socket_state = palmtc_pcmcia_socket_state,
.configure_socket = palmtc_pcmcia_configure_socket,
-
- .socket_init = palmtc_pcmcia_socket_init,
- .socket_suspend = palmtc_pcmcia_socket_suspend,
};
static struct platform_device *palmtc_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index 1a258045040..80645a688ee 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -67,14 +67,6 @@ palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void palmtx_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void palmtx_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level palmtx_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -86,9 +78,6 @@ static struct pcmcia_low_level palmtx_pcmcia_ops = {
.socket_state = palmtx_pcmcia_socket_state,
.configure_socket = palmtx_pcmcia_configure_socket,
-
- .socket_init = palmtx_pcmcia_socket_init,
- .socket_suspend = palmtx_pcmcia_socket_suspend,
};
static struct platform_device *palmtx_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index d08802fe35f..939622251df 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -28,7 +28,6 @@
#include "soc_common.h"
-#define SG2_S0_BUFF_CTL 120
#define SG2_S0_POWER_CTL 108
#define SG2_S0_GPIO_RESET 82
#define SG2_S0_GPIO_DETECT 53
@@ -38,6 +37,11 @@ static struct pcmcia_irqs irqs[] = {
{ 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" },
};
+static struct gpio sg2_pcmcia_gpios[] = {
+ { SG2_S0_GPIO_RESET, GPIOF_OUT_INIT_HIGH, "PCMCIA Reset" },
+ { SG2_S0_POWER_CTL, GPIOF_OUT_INIT_HIGH, "PCMCIA Power Ctrl" },
+};
+
static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
@@ -122,37 +126,23 @@ static int __init sg2_pcmcia_init(void)
if (!sg2_pcmcia_device)
return -ENOMEM;
- ret = gpio_request(SG2_S0_BUFF_CTL, "SG2 CF buff ctl");
+ ret = gpio_request_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
if (ret)
goto error_put_platform_device;
- ret = gpio_request(SG2_S0_POWER_CTL, "SG2 CF power ctl");
- if (ret)
- goto error_free_gpio_buff_ctl;
- ret = gpio_request(SG2_S0_GPIO_RESET, "SG2 CF reset");
- if (ret)
- goto error_free_gpio_power_ctl;
- /* Set gpio directions */
- gpio_direction_output(SG2_S0_BUFF_CTL, 0);
- gpio_direction_output(SG2_S0_POWER_CTL, 1);
- gpio_direction_output(SG2_S0_GPIO_RESET, 1);
ret = platform_device_add_data(sg2_pcmcia_device,
&sg2_pcmcia_ops,
sizeof(sg2_pcmcia_ops));
if (ret)
- goto error_free_gpio_reset;
+ goto error_free_gpios;
ret = platform_device_add(sg2_pcmcia_device);
if (ret)
- goto error_free_gpio_reset;
+ goto error_free_gpios;
return 0;
-error_free_gpio_reset:
- gpio_free(SG2_S0_GPIO_RESET);
-error_free_gpio_power_ctl:
- gpio_free(SG2_S0_POWER_CTL);
-error_free_gpio_buff_ctl:
- gpio_free(SG2_S0_BUFF_CTL);
+error_free_gpios:
+ gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
error_put_platform_device:
platform_device_put(sg2_pcmcia_device);
@@ -162,9 +152,7 @@ error_put_platform_device:
static void __exit sg2_pcmcia_exit(void)
{
platform_device_unregister(sg2_pcmcia_device);
- gpio_free(SG2_S0_BUFF_CTL);
- gpio_free(SG2_S0_POWER_CTL);
- gpio_free(SG2_S0_GPIO_RESET);
+ gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
}
fs_initcall(sg2_pcmcia_init);
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index a51f2077644..1064b1c2869 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -136,22 +136,12 @@ static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void viper_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void viper_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level viper_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = viper_pcmcia_hw_init,
.hw_shutdown = viper_pcmcia_hw_shutdown,
.socket_state = viper_pcmcia_socket_state,
.configure_socket = viper_pcmcia_configure_socket,
- .socket_init = viper_pcmcia_socket_init,
- .socket_suspend = viper_pcmcia_socket_suspend,
.nr = 1,
};
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 768f9572a8c..a0a9c2aa8d7 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -186,8 +186,8 @@ static int soc_common_pcmcia_sock_init(struct pcmcia_socket *sock)
struct soc_pcmcia_socket *skt = to_soc_pcmcia_socket(sock);
debug(skt, 2, "initializing socket\n");
-
- skt->ops->socket_init(skt);
+ if (skt->ops->socket_init)
+ skt->ops->socket_init(skt);
return 0;
}
@@ -207,7 +207,8 @@ static int soc_common_pcmcia_suspend(struct pcmcia_socket *sock)
debug(skt, 2, "suspending socket\n");
- skt->ops->socket_suspend(skt);
+ if (skt->ops->socket_suspend)
+ skt->ops->socket_suspend(skt);
return 0;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 45e0191c35d..1e88d478532 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -769,4 +769,12 @@ config INTEL_OAKTRAIL
enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
here; it will only load on supported platforms.
+config SAMSUNG_Q10
+ tristate "Samsung Q10 Extras"
+ depends on SERIO_I8042
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This driver provides support for backlight control on Samsung Q10
+ and related laptops, including Dell Latitude X200.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index afc1f832aa6..293a320d9fa 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
+obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index e1c4938b301..af2bb20cb2f 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -99,6 +99,7 @@ enum acer_wmi_event_ids {
static const struct key_entry acer_wmi_keymap[] = {
{KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
{KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
@@ -304,6 +305,10 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = {
.wireless = 2,
};
+static struct quirk_entry quirk_lenovo_ideapad_s205 = {
+ .wireless = 3,
+};
+
/* The Aspire One has a dummy ACPI-WMI interface - disable it */
static struct dmi_system_id __devinitdata acer_blacklist[] = {
{
@@ -450,6 +455,15 @@ static struct dmi_system_id acer_quirks[] = {
},
.driver_data = &quirk_medion_md_98300,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
{}
};
@@ -542,6 +556,12 @@ struct wmi_interface *iface)
return AE_ERROR;
*value = result & 0x1;
return AE_OK;
+ case 3:
+ err = ec_read(0x78, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result & 0x1;
+ return AE_OK;
default:
err = ec_read(0xA, &result);
if (err)
@@ -1266,8 +1286,13 @@ static void acer_rfkill_update(struct work_struct *ignored)
acpi_status status;
status = get_u32(&state, ACER_CAP_WIRELESS);
- if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(wireless_rfkill, !state);
+ if (ACPI_SUCCESS(status)) {
+ if (quirks->wireless == 3) {
+ rfkill_set_hw_state(wireless_rfkill, !state);
+ } else {
+ rfkill_set_sw_state(wireless_rfkill, !state);
+ }
+ }
if (has_cap(ACER_CAP_BLUETOOTH)) {
status = get_u32(&state, ACER_CAP_BLUETOOTH);
@@ -1400,6 +1425,9 @@ static ssize_t show_bool_threeg(struct device *dev,
{
u32 result; \
acpi_status status;
+
+ pr_info("This threeg sysfs will be removed in 2012"
+ " - used by: %s\n", current->comm);
if (wmi_has_guid(WMID_GUID3))
status = wmid3_get_device_status(&result,
ACER_WMID3_GDS_THREEG);
@@ -1415,8 +1443,10 @@ static ssize_t set_bool_threeg(struct device *dev,
{
u32 tmp = simple_strtoul(buf, NULL, 10);
acpi_status status = set_u32(tmp, ACER_CAP_THREEG);
- if (ACPI_FAILURE(status))
- return -EINVAL;
+ pr_info("This threeg sysfs will be removed in 2012"
+ " - used by: %s\n", current->comm);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
return count;
}
static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
@@ -1425,6 +1455,8 @@ static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
char *buf)
{
+ pr_info("This interface sysfs will be removed in 2012"
+ " - used by: %s\n", current->comm);
switch (interface->type) {
case ACER_AMW0:
return sprintf(buf, "AMW0\n");
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index fca3489218b..760c6d7624f 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -182,6 +182,7 @@ static const struct bios_settings_t bios_tbl[] = {
{"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
/* Acer 531 */
{"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
/* Gateway */
@@ -703,15 +704,15 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Feuerer");
MODULE_DESCRIPTION("Aspire One temperature and fan driver");
MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
-MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1410*:");
-MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1810*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMU*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
module_init(acerhdf_init);
module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d65df92e2ac..fa6d7ec68b2 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -70,11 +70,10 @@ MODULE_LICENSE("GPL");
* WAPF defines the behavior of the Fn+Fx wlan key
* The significance of values is yet to be found, but
* most of the time:
- * 0x0 will do nothing
- * 0x1 will allow to control the device with Fn+Fx key.
- * 0x4 will send an ACPI event (0x88) while pressing the Fn+Fx key
- * 0x5 like 0x1 or 0x4
- * So, if something doesn't work as you want, just try other values =)
+ * Bit | Bluetooth | WLAN
+ * 0 | Hardware | Hardware
+ * 1 | Hardware | Software
+ * 4 | Software | Software
*/
static uint wapf = 1;
module_param(wapf, uint, 0444);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 0580d99b079..b0859d4183e 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -38,6 +38,24 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID);
+/*
+ * WAPF defines the behavior of the Fn+Fx wlan key
+ * The significance of values is yet to be found, but
+ * most of the time:
+ * Bit | Bluetooth | WLAN
+ * 0 | Hardware | Hardware
+ * 1 | Hardware | Software
+ * 4 | Software | Software
+ */
+static uint wapf;
+module_param(wapf, uint, 0444);
+MODULE_PARM_DESC(wapf, "WAPF value");
+
+static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+{
+ driver->wapf = wapf;
+}
+
static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
@@ -53,16 +71,16 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x51, { KEY_WWW } },
{ KE_KEY, 0x55, { KEY_CALC } },
{ KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */
- { KE_KEY, 0x5D, { KEY_WLAN } },
- { KE_KEY, 0x5E, { KEY_WLAN } },
- { KE_KEY, 0x5F, { KEY_WLAN } },
+ { KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */
+ { KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */
+ { KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */
{ KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
- { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } },
+ { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
{ KE_KEY, 0x82, { KEY_CAMERA } },
{ KE_KEY, 0x88, { KEY_RFKILL } },
{ KE_KEY, 0x8A, { KEY_PROG1 } },
@@ -81,6 +99,7 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
.keymap = asus_nb_wmi_keymap,
.input_name = "Asus WMI hotkeys",
.input_phys = ASUS_NB_WMI_FILE "/input0",
+ .quirks = asus_nb_wmi_quirks,
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 65b66aa44c7..95cba9ebf6c 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -44,6 +44,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
+#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -66,6 +67,8 @@ MODULE_LICENSE("GPL");
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
+#define NOTIFY_KBD_BRTUP 0xc4
+#define NOTIFY_KBD_BRTDWN 0xc5
/* WMI Methods */
#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
@@ -93,6 +96,7 @@ MODULE_LICENSE("GPL");
/* Wireless */
#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
+#define ASUS_WMI_DEVID_CWAP 0x00010003
#define ASUS_WMI_DEVID_WLAN 0x00010011
#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
#define ASUS_WMI_DEVID_GPS 0x00010015
@@ -102,6 +106,12 @@ MODULE_LICENSE("GPL");
/* Leds */
/* 0x000200XX and 0x000400XX */
+#define ASUS_WMI_DEVID_LED1 0x00020011
+#define ASUS_WMI_DEVID_LED2 0x00020012
+#define ASUS_WMI_DEVID_LED3 0x00020013
+#define ASUS_WMI_DEVID_LED4 0x00020014
+#define ASUS_WMI_DEVID_LED5 0x00020015
+#define ASUS_WMI_DEVID_LED6 0x00020016
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
@@ -174,13 +184,18 @@ struct asus_wmi {
struct led_classdev tpd_led;
int tpd_led_wk;
+ struct led_classdev kbd_led;
+ int kbd_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
+ struct work_struct kbd_led_work;
struct asus_rfkill wlan;
struct asus_rfkill bluetooth;
struct asus_rfkill wimax;
struct asus_rfkill wwan3g;
+ struct asus_rfkill gps;
+ struct asus_rfkill uwb;
struct hotplug_slot *hotplug_slot;
struct mutex hotplug_lock;
@@ -205,6 +220,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
asus->inputdev->phys = asus->driver->input_phys;
asus->inputdev->id.bustype = BUS_HOST;
asus->inputdev->dev.parent = &asus->platform_device->dev;
+ set_bit(EV_REP, asus->inputdev->evbit);
err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
if (err)
@@ -359,30 +375,80 @@ static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
return read_tpd_led_state(asus);
}
-static int asus_wmi_led_init(struct asus_wmi *asus)
+static void kbd_led_update(struct work_struct *work)
{
- int rv;
+ int ctrl_param = 0;
+ struct asus_wmi *asus;
- if (read_tpd_led_state(asus) < 0)
- return 0;
+ asus = container_of(work, struct asus_wmi, kbd_led_work);
- asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
- if (!asus->led_workqueue)
- return -ENOMEM;
- INIT_WORK(&asus->tpd_led_work, tpd_led_update);
+ /*
+ * bits 0-2: level
+ * bit 7: light on/off
+ */
+ if (asus->kbd_led_wk > 0)
+ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
- asus->tpd_led.name = "asus::touchpad";
- asus->tpd_led.brightness_set = tpd_led_set;
- asus->tpd_led.brightness_get = tpd_led_get;
- asus->tpd_led.max_brightness = 1;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
+}
- rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led);
- if (rv) {
- destroy_workqueue(asus->led_workqueue);
- return rv;
+static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
+{
+ int retval;
+
+ /*
+ * bits 0-2: level
+ * bit 7: light on/off
+ * bit 8-10: environment (0: dark, 1: normal, 2: light)
+ * bit 17: status unknown
+ */
+ retval = asus_wmi_get_devstate_bits(asus, ASUS_WMI_DEVID_KBD_BACKLIGHT,
+ 0xFFFF);
+
+ /* Unknown status is considered as off */
+ if (retval == 0x8000)
+ retval = 0;
+
+ if (retval >= 0) {
+ if (level)
+ *level = retval & 0x80 ? retval & 0x7F : 0;
+ if (env)
+ *env = (retval >> 8) & 0x7F;
+ retval = 0;
}
- return 0;
+ return retval;
+}
+
+static void kbd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, kbd_led);
+
+ if (value > asus->kbd_led.max_brightness)
+ value = asus->kbd_led.max_brightness;
+ else if (value < 0)
+ value = 0;
+
+ asus->kbd_led_wk = value;
+ queue_work(asus->led_workqueue, &asus->kbd_led_work);
+}
+
+static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ int retval, value;
+
+ asus = container_of(led_cdev, struct asus_wmi, kbd_led);
+
+ retval = kbd_led_read(asus, &value, NULL);
+
+ if (retval < 0)
+ return retval;
+
+ return value;
}
static void asus_wmi_led_exit(struct asus_wmi *asus)
@@ -393,6 +459,48 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
destroy_workqueue(asus->led_workqueue);
}
+static int asus_wmi_led_init(struct asus_wmi *asus)
+{
+ int rv = 0;
+
+ asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!asus->led_workqueue)
+ return -ENOMEM;
+
+ if (read_tpd_led_state(asus) >= 0) {
+ INIT_WORK(&asus->tpd_led_work, tpd_led_update);
+
+ asus->tpd_led.name = "asus::touchpad";
+ asus->tpd_led.brightness_set = tpd_led_set;
+ asus->tpd_led.brightness_get = tpd_led_get;
+ asus->tpd_led.max_brightness = 1;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->tpd_led);
+ if (rv)
+ goto error;
+ }
+
+ if (kbd_led_read(asus, NULL, NULL) >= 0) {
+ INIT_WORK(&asus->kbd_led_work, kbd_led_update);
+
+ asus->kbd_led.name = "asus::kbd_backlight";
+ asus->kbd_led.brightness_set = kbd_led_set;
+ asus->kbd_led.brightness_get = kbd_led_get;
+ asus->kbd_led.max_brightness = 3;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->kbd_led);
+ }
+
+error:
+ if (rv)
+ asus_wmi_led_exit(asus);
+
+ return rv;
+}
+
+
/*
* PCI hotplug (for wlan rfkill)
*/
@@ -729,6 +837,16 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
rfkill_destroy(asus->wwan3g.rfkill);
asus->wwan3g.rfkill = NULL;
}
+ if (asus->gps.rfkill) {
+ rfkill_unregister(asus->gps.rfkill);
+ rfkill_destroy(asus->gps.rfkill);
+ asus->gps.rfkill = NULL;
+ }
+ if (asus->uwb.rfkill) {
+ rfkill_unregister(asus->uwb.rfkill);
+ rfkill_destroy(asus->uwb.rfkill);
+ asus->uwb.rfkill = NULL;
+ }
}
static int asus_wmi_rfkill_init(struct asus_wmi *asus)
@@ -763,6 +881,18 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus)
if (result && result != -ENODEV)
goto exit;
+ result = asus_new_rfkill(asus, &asus->gps, "asus-gps",
+ RFKILL_TYPE_GPS, ASUS_WMI_DEVID_GPS);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = asus_new_rfkill(asus, &asus->uwb, "asus-uwb",
+ RFKILL_TYPE_UWB, ASUS_WMI_DEVID_UWB);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
if (!asus->driver->hotplug_wireless)
goto exit;
@@ -797,8 +927,8 @@ exit:
* Hwmon device
*/
static ssize_t asus_hwmon_pwm1(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
u32 value;
@@ -809,7 +939,7 @@ static ssize_t asus_hwmon_pwm1(struct device *dev,
if (err < 0)
return err;
- value |= 0xFF;
+ value &= 0xFF;
if (value == 1) /* Low Speed */
value = 85;
@@ -825,7 +955,26 @@ static ssize_t asus_hwmon_pwm1(struct device *dev,
return sprintf(buf, "%d\n", value);
}
+static ssize_t asus_hwmon_temp1(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value);
+
+ if (err < 0)
+ return err;
+
+ value = KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000;
+
+ return sprintf(buf, "%d\n", value);
+}
+
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0);
static ssize_t
show_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -836,12 +985,13 @@ static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL
};
static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
- struct attribute *attr, int idx)
+ struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev->parent);
@@ -852,6 +1002,8 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
dev_id = ASUS_WMI_DEVID_FAN_CTRL;
+ else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr)
+ dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
if (dev_id != -1) {
int err = asus_wmi_get_devstate(asus, dev_id, &value);
@@ -869,9 +1021,13 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
* - reverved bits are non-zero
* - sfun and presence bit are not set
*/
- if (value != ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
+ if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
|| (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)))
ok = false;
+ } else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) {
+ /* If value is zero, something is clearly wrong */
+ if (value == 0)
+ ok = false;
}
return ok ? attr->mode : 0;
@@ -904,6 +1060,7 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus)
pr_err("Could not register asus hwmon device\n");
return PTR_ERR(hwmon);
}
+ dev_set_drvdata(hwmon, asus);
asus->hwmon_device = hwmon;
result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
if (result)
@@ -1060,6 +1217,8 @@ static void asus_wmi_notify(u32 value, void *context)
acpi_status status;
int code;
int orig_code;
+ unsigned int key_value = 1;
+ bool autorelease = 1;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
@@ -1075,6 +1234,13 @@ static void asus_wmi_notify(u32 value, void *context)
code = obj->integer.value;
orig_code = code;
+ if (asus->driver->key_filter) {
+ asus->driver->key_filter(asus->driver, &code, &key_value,
+ &autorelease);
+ if (code == ASUS_WMI_KEY_IGNORE)
+ goto exit;
+ }
+
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
code = NOTIFY_BRNUP_MIN;
else if (code >= NOTIFY_BRNDOWN_MIN &&
@@ -1084,7 +1250,8 @@ static void asus_wmi_notify(u32 value, void *context)
if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
if (!acpi_video_backlight_support())
asus_wmi_backlight_notify(asus, orig_code);
- } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true))
+ } else if (!sparse_keymap_report_event(asus->inputdev, code,
+ key_value, autorelease))
pr_info("Unknown key %x pressed\n", code);
exit:
@@ -1164,14 +1331,18 @@ ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int value;
+ int value, rv;
if (!count || sscanf(buf, "%i", &value) != 1)
return -EINVAL;
if (value < 0 || value > 2)
return -EINVAL;
- return asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
+ rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
+ if (rv < 0)
+ return rv;
+
+ return count;
}
static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
@@ -1234,7 +1405,7 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
/* We don't know yet what to do with this version... */
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) {
- pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF);
+ pr_info("BIOS WMI version: %d.%d", rv >> 16, rv & 0xFF);
asus->spec = rv;
}
@@ -1266,6 +1437,12 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
return -ENODEV;
}
+ /* CWAP allow to define the behavior of the Fn+F2 key,
+ * this method doesn't seems to be present on Eee PCs */
+ if (asus->driver->wapf >= 0)
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_CWAP,
+ asus->driver->wapf, NULL);
+
return asus_wmi_sysfs_init(asus->platform_device);
}
@@ -1568,6 +1745,14 @@ static int asus_hotk_restore(struct device *device)
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G);
rfkill_set_sw_state(asus->wwan3g.rfkill, bl);
}
+ if (asus->gps.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPS);
+ rfkill_set_sw_state(asus->gps.rfkill, bl);
+ }
+ if (asus->uwb.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_UWB);
+ rfkill_set_sw_state(asus->uwb.rfkill, bl);
+ }
return 0;
}
@@ -1604,7 +1789,7 @@ static int asus_wmi_probe(struct platform_device *pdev)
static bool used;
-int asus_wmi_register_driver(struct asus_wmi_driver *driver)
+int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
{
struct platform_driver *platform_driver;
struct platform_device *platform_device;
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index c044522c876..8147c10161c 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -29,12 +29,15 @@
#include <linux/platform_device.h>
+#define ASUS_WMI_KEY_IGNORE (-1)
+
struct module;
struct key_entry;
struct asus_wmi;
struct asus_wmi_driver {
bool hotplug_wireless;
+ int wapf;
const char *name;
struct module *owner;
@@ -44,6 +47,10 @@ struct asus_wmi_driver {
const struct key_entry *keymap;
const char *input_name;
const char *input_phys;
+ /* Returns new code, value, and autorelease values in arguments.
+ * Return ASUS_WMI_KEY_IGNORE in code if event should be ignored. */
+ void (*key_filter) (struct asus_wmi_driver *driver, int *code,
+ unsigned int *value, bool *autorelease);
int (*probe) (struct platform_device *device);
void (*quirks) (struct asus_wmi_driver *driver);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index e39ab1d3ed8..f31fa4efa72 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -612,7 +612,6 @@ static int __init dell_init(void)
if (!bufferpage)
goto fail_buffer;
buffer = page_address(bufferpage);
- mutex_init(&buffer_mutex);
ret = dell_setup_rfkill();
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index ce790827e19..fa9a2171cc1 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -54,6 +54,8 @@ MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
*/
static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
+ { KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
+
{ KE_KEY, 0xe045, { KEY_PROG1 } },
{ KE_KEY, 0xe009, { KEY_EJECTCD } },
@@ -85,6 +87,11 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
{ KE_IGNORE, 0xe013, { KEY_RESERVED } },
{ KE_IGNORE, 0xe020, { KEY_MUTE } },
+
+ /* Shortcut and audio panel keys */
+ { KE_IGNORE, 0xe025, { KEY_RESERVED } },
+ { KE_IGNORE, 0xe026, { KEY_RESERVED } },
+
{ KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
{ KE_IGNORE, 0xe030, { KEY_VOLUMEUP } },
{ KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } },
@@ -92,6 +99,9 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
{ KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } },
{ KE_IGNORE, 0xe045, { KEY_NUMLOCK } },
{ KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } },
+ { KE_IGNORE, 0xe0f7, { KEY_MUTE } },
+ { KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } },
+ { KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } },
{ KE_END, 0 }
};
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 4aa867a9b88..9f6e64302b4 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -56,6 +56,11 @@ MODULE_PARM_DESC(hotplug_wireless,
"If your laptop needs that, please report to "
"acpi4asus-user@lists.sourceforge.net.");
+/* Values for T101MT "Home" key */
+#define HOME_PRESS 0xe4
+#define HOME_HOLD 0xea
+#define HOME_RELEASE 0xe5
+
static const struct key_entry eeepc_wmi_keymap[] = {
/* Sleep already handled via generic ACPI code */
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
@@ -71,6 +76,7 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
{ KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
+ { KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */
{ KE_KEY, 0xe8, { KEY_SCREENLOCK } },
{ KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
{ KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
@@ -81,6 +87,25 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_END, 0},
};
+static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
+ unsigned int *value, bool *autorelease)
+{
+ switch (*code) {
+ case HOME_PRESS:
+ *value = 1;
+ *autorelease = 0;
+ break;
+ case HOME_HOLD:
+ *code = ASUS_WMI_KEY_IGNORE;
+ break;
+ case HOME_RELEASE:
+ *code = HOME_PRESS;
+ *value = 0;
+ *autorelease = 0;
+ break;
+ }
+}
+
static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
void *context, void **retval)
{
@@ -141,6 +166,7 @@ static void eeepc_dmi_check(struct asus_wmi_driver *driver)
static void eeepc_wmi_quirks(struct asus_wmi_driver *driver)
{
driver->hotplug_wireless = hotplug_wireless;
+ driver->wapf = -1;
eeepc_dmi_check(driver);
}
@@ -151,6 +177,7 @@ static struct asus_wmi_driver asus_wmi_driver = {
.keymap = eeepc_wmi_keymap,
.input_name = "Eee PC WMI hotkeys",
.input_phys = EEEPC_WMI_FILE "/input0",
+ .key_filter = eeepc_wmi_key_filter,
.probe = eeepc_wmi_probe,
.quirks = eeepc_wmi_quirks,
};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index bfdda33feb2..0c595410e78 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -32,13 +32,22 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
#define IDEAPAD_RFKILL_DEV_NUM (3)
+#define CFG_BT_BIT (16)
+#define CFG_3G_BIT (17)
+#define CFG_WIFI_BIT (18)
+#define CFG_CAMERA_BIT (19)
+
struct ideapad_private {
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
struct input_dev *inputdev;
+ struct backlight_device *blightdev;
+ unsigned long cfg;
};
static acpi_handle ideapad_handle;
@@ -155,7 +164,7 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
}
/*
- * camera power
+ * sysfs
*/
static ssize_t show_ideapad_cam(struct device *dev,
struct device_attribute *attr,
@@ -186,6 +195,44 @@ static ssize_t store_ideapad_cam(struct device *dev,
static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
+static ssize_t show_ideapad_cfg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%.8lX\n", priv->cfg);
+}
+
+static DEVICE_ATTR(cfg, 0444, show_ideapad_cfg, NULL);
+
+static struct attribute *ideapad_attributes[] = {
+ &dev_attr_camera_power.attr,
+ &dev_attr_cfg.attr,
+ NULL
+};
+
+static mode_t ideapad_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool supported;
+
+ if (attr == &dev_attr_camera_power.attr)
+ supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
+ else
+ supported = true;
+
+ return supported ? attr->mode : 0;
+}
+
+static struct attribute_group ideapad_attribute_group = {
+ .is_visible = ideapad_is_visible,
+ .attrs = ideapad_attributes
+};
+
/*
* Rfkill
*/
@@ -197,9 +244,9 @@ struct ideapad_rfk_data {
};
const struct ideapad_rfk_data ideapad_rfk_data[] = {
- { "ideapad_wlan", 18, 0x15, RFKILL_TYPE_WLAN },
- { "ideapad_bluetooth", 16, 0x17, RFKILL_TYPE_BLUETOOTH },
- { "ideapad_3g", 17, 0x20, RFKILL_TYPE_WWAN },
+ { "ideapad_wlan", CFG_WIFI_BIT, 0x15, RFKILL_TYPE_WLAN },
+ { "ideapad_bluetooth", CFG_BT_BIT, 0x17, RFKILL_TYPE_BLUETOOTH },
+ { "ideapad_3g", CFG_3G_BIT, 0x20, RFKILL_TYPE_WWAN },
};
static int ideapad_rfk_set(void *data, bool blocked)
@@ -265,8 +312,7 @@ static int __devinit ideapad_register_rfkill(struct acpi_device *adevice,
return 0;
}
-static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice,
- int dev)
+static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
@@ -280,15 +326,6 @@ static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice,
/*
* Platform device
*/
-static struct attribute *ideapad_attributes[] = {
- &dev_attr_camera_power.attr,
- NULL
-};
-
-static struct attribute_group ideapad_attribute_group = {
- .attrs = ideapad_attributes
-};
-
static int __devinit ideapad_platform_init(struct ideapad_private *priv)
{
int result;
@@ -369,7 +406,7 @@ err_free_dev:
return error;
}
-static void __devexit ideapad_input_exit(struct ideapad_private *priv)
+static void ideapad_input_exit(struct ideapad_private *priv)
{
sparse_keymap_free(priv->inputdev);
input_unregister_device(priv->inputdev);
@@ -383,6 +420,98 @@ static void ideapad_input_report(struct ideapad_private *priv,
}
/*
+ * backlight
+ */
+static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
+{
+ unsigned long now;
+
+ if (read_ec_data(ideapad_handle, 0x12, &now))
+ return -EIO;
+ return now;
+}
+
+static int ideapad_backlight_update_status(struct backlight_device *blightdev)
+{
+ if (write_ec_cmd(ideapad_handle, 0x13, blightdev->props.brightness))
+ return -EIO;
+ if (write_ec_cmd(ideapad_handle, 0x33,
+ blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
+ return -EIO;
+
+ return 0;
+}
+
+static const struct backlight_ops ideapad_backlight_ops = {
+ .get_brightness = ideapad_backlight_get_brightness,
+ .update_status = ideapad_backlight_update_status,
+};
+
+static int ideapad_backlight_init(struct ideapad_private *priv)
+{
+ struct backlight_device *blightdev;
+ struct backlight_properties props;
+ unsigned long max, now, power;
+
+ if (read_ec_data(ideapad_handle, 0x11, &max))
+ return -EIO;
+ if (read_ec_data(ideapad_handle, 0x12, &now))
+ return -EIO;
+ if (read_ec_data(ideapad_handle, 0x18, &power))
+ return -EIO;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = max;
+ props.type = BACKLIGHT_PLATFORM;
+ blightdev = backlight_device_register("ideapad",
+ &priv->platform_device->dev,
+ priv,
+ &ideapad_backlight_ops,
+ &props);
+ if (IS_ERR(blightdev)) {
+ pr_err("Could not register backlight device\n");
+ return PTR_ERR(blightdev);
+ }
+
+ priv->blightdev = blightdev;
+ blightdev->props.brightness = now;
+ blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ backlight_update_status(blightdev);
+
+ return 0;
+}
+
+static void ideapad_backlight_exit(struct ideapad_private *priv)
+{
+ if (priv->blightdev)
+ backlight_device_unregister(priv->blightdev);
+ priv->blightdev = NULL;
+}
+
+static void ideapad_backlight_notify_power(struct ideapad_private *priv)
+{
+ unsigned long power;
+ struct backlight_device *blightdev = priv->blightdev;
+
+ if (read_ec_data(ideapad_handle, 0x18, &power))
+ return;
+ blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+}
+
+static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
+{
+ unsigned long now;
+
+ /* if we control brightness via acpi video driver */
+ if (priv->blightdev == NULL) {
+ read_ec_data(ideapad_handle, 0x12, &now);
+ return;
+ }
+
+ backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY);
+}
+
+/*
* module init/exit
*/
static const struct acpi_device_id ideapad_device_ids[] = {
@@ -393,10 +522,11 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
{
- int ret, i, cfg;
+ int ret, i;
+ unsigned long cfg;
struct ideapad_private *priv;
- if (read_method_int(adevice->handle, "_CFG", &cfg))
+ if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -404,6 +534,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
return -ENOMEM;
dev_set_drvdata(&adevice->dev, priv);
ideapad_handle = adevice->handle;
+ priv->cfg = cfg;
ret = ideapad_platform_init(priv);
if (ret)
@@ -414,15 +545,25 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
goto input_failed;
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
- if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg))
+ if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
ideapad_register_rfkill(adevice, i);
else
priv->rfk[i] = NULL;
}
ideapad_sync_rfk_state(adevice);
+ if (!acpi_video_backlight_support()) {
+ ret = ideapad_backlight_init(priv);
+ if (ret && ret != -ENODEV)
+ goto backlight_failed;
+ }
+
return 0;
+backlight_failed:
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ ideapad_unregister_rfkill(adevice, i);
+ ideapad_input_exit(priv);
input_failed:
ideapad_platform_exit(priv);
platform_failed:
@@ -435,6 +576,7 @@ static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type)
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int i;
+ ideapad_backlight_exit(priv);
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
ideapad_unregister_rfkill(adevice, i);
ideapad_input_exit(priv);
@@ -459,12 +601,19 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
vpc1 = (vpc2 << 8) | vpc1;
for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
if (test_bit(vpc_bit, &vpc1)) {
- if (vpc_bit == 9)
+ switch (vpc_bit) {
+ case 9:
ideapad_sync_rfk_state(adevice);
- else if (vpc_bit == 4)
- read_ec_data(handle, 0x12, &vpc2);
- else
+ break;
+ case 4:
+ ideapad_backlight_notify_brightness(priv);
+ break;
+ case 2:
+ ideapad_backlight_notify_power(priv);
+ break;
+ default:
ideapad_input_report(priv, vpc_bit);
+ }
}
}
}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 5ffe7c39814..809a3ae943c 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -403,7 +403,7 @@ static void ips_cpu_raise(struct ips_driver *ips)
thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
- turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN;
+ turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
@@ -438,7 +438,7 @@ static void ips_cpu_lower(struct ips_driver *ips)
thm_writew(THM_MPCPC, (new_limit * 10) / 8);
- turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN;
+ turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 809adea4965..abddc83e9fd 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -477,6 +477,8 @@ static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl,
return AE_ERROR;
}
+ return AE_OK;
+
aux1_not_found:
if (status == AE_NOT_FOUND)
return AE_OK;
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 3a578323122..ccd7b1f8351 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -493,20 +493,30 @@ static int mid_thermal_probe(struct platform_device *pdev)
/* Register each sensor with the generic thermal framework*/
for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
+ struct thermal_device_info *td_info = initialize_sensor(i);
+
+ if (!td_info) {
+ ret = -ENOMEM;
+ goto err;
+ }
pinfo->tzd[i] = thermal_zone_device_register(name[i],
- 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0);
- if (IS_ERR(pinfo->tzd[i]))
- goto reg_fail;
+ 0, td_info, &tzd_ops, 0, 0, 0, 0);
+ if (IS_ERR(pinfo->tzd[i])) {
+ kfree(td_info);
+ ret = PTR_ERR(pinfo->tzd[i]);
+ goto err;
+ }
}
pinfo->pdev = pdev;
platform_set_drvdata(pdev, pinfo);
return 0;
-reg_fail:
- ret = PTR_ERR(pinfo->tzd[i]);
- while (--i >= 0)
+err:
+ while (--i >= 0) {
+ kfree(pinfo->tzd[i]->devdata);
thermal_zone_device_unregister(pinfo->tzd[i]);
+ }
configure_adc(0);
kfree(pinfo);
return ret;
@@ -524,8 +534,10 @@ static int mid_thermal_remove(struct platform_device *pdev)
int i;
struct platform_info *pinfo = platform_get_drvdata(pdev);
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
+ for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
+ kfree(pinfo->tzd[i]->devdata);
thermal_zone_device_unregister(pinfo->tzd[i]);
+ }
kfree(pinfo);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index bde47e9080c..c8a6aed4527 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -637,15 +637,13 @@ end_function:
return error;
}
-const struct pci_device_id rar_pci_id_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rar_pci_id_tbl) = {
{ PCI_VDEVICE(INTEL, 0x4110) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl);
-const struct pci_device_id *my_id_table = rar_pci_id_tbl;
-
/* field for registering driver to PCI device */
static struct pci_driver rar_pci_driver = {
.name = "rar_register_driver",
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 940accbe28d..c86665369a2 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -725,7 +725,7 @@ static void ipc_remove(struct pci_dev *pdev)
intel_scu_devices_destroy();
}
-static const struct pci_device_id pci_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
{ 0,}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 3ff629df9f0..f204643c505 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -538,6 +538,15 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
},
.callback = dmi_check_cb
},
+ {
+ .ident = "MSI U270",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Micro-Star International Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"),
+ },
+ .callback = dmi_check_cb
+ },
{ }
};
@@ -996,3 +1005,4 @@ MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N034:*");
MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
+MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*");
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index c832e3356cd..6f40bf202dc 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -272,6 +272,7 @@ static int __init msi_wmi_init(void)
err_free_backlight:
backlight_device_unregister(backlight);
err_free_input:
+ sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
err_uninstall_notifier:
wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index d347116d150..35916301104 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -521,6 +521,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "N510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N510"),
+ DMI_MATCH(DMI_BOARD_NAME, "N510"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "X125",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
@@ -601,6 +611,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "N150/N210/N220",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "N150/N210/N220/N230",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
new file mode 100644
index 00000000000..1e54ae74274
--- /dev/null
+++ b/drivers/platform/x86/samsung-q10.c
@@ -0,0 +1,196 @@
+/*
+ * Driver for Samsung Q10 and related laptops: controls the backlight
+ *
+ * Copyright (c) 2011 Frederick van der Wyck <fvanderwyck@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/i8042.h>
+#include <linux/dmi.h>
+
+#define SAMSUNGQ10_BL_MAX_INTENSITY 255
+#define SAMSUNGQ10_BL_DEFAULT_INTENSITY 185
+
+#define SAMSUNGQ10_BL_8042_CMD 0xbe
+#define SAMSUNGQ10_BL_8042_DATA { 0x89, 0x91 }
+
+static int samsungq10_bl_brightness;
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force,
+ "Disable the DMI check and force the driver to be loaded");
+
+static int samsungq10_bl_set_intensity(struct backlight_device *bd)
+{
+
+ int brightness = bd->props.brightness;
+ unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA;
+
+ c[2] = (unsigned char)brightness;
+ i8042_lock_chip();
+ i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD);
+ i8042_unlock_chip();
+ samsungq10_bl_brightness = brightness;
+
+ return 0;
+}
+
+static int samsungq10_bl_get_intensity(struct backlight_device *bd)
+{
+ return samsungq10_bl_brightness;
+}
+
+static const struct backlight_ops samsungq10_bl_ops = {
+ .get_brightness = samsungq10_bl_get_intensity,
+ .update_status = samsungq10_bl_set_intensity,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int samsungq10_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int samsungq10_resume(struct device *dev)
+{
+
+ struct backlight_device *bd = dev_get_drvdata(dev);
+
+ samsungq10_bl_set_intensity(bd);
+ return 0;
+}
+#else
+#define samsungq10_suspend NULL
+#define samsungq10_resume NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops,
+ samsungq10_suspend, samsungq10_resume);
+
+static int __devinit samsungq10_probe(struct platform_device *pdev)
+{
+
+ struct backlight_properties props;
+ struct backlight_device *bd;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = SAMSUNGQ10_BL_MAX_INTENSITY;
+ bd = backlight_device_register("samsung", &pdev->dev, NULL,
+ &samsungq10_bl_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ platform_set_drvdata(pdev, bd);
+
+ bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
+ samsungq10_bl_set_intensity(bd);
+
+ return 0;
+}
+
+static int __devexit samsungq10_remove(struct platform_device *pdev)
+{
+
+ struct backlight_device *bd = platform_get_drvdata(pdev);
+
+ bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
+ samsungq10_bl_set_intensity(bd);
+
+ backlight_device_unregister(bd);
+
+ return 0;
+}
+
+static struct platform_driver samsungq10_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .pm = &samsungq10_pm_ops,
+ },
+ .probe = samsungq10_probe,
+ .remove = __devexit_p(samsungq10_remove),
+};
+
+static struct platform_device *samsungq10_device;
+
+static int __init dmi_check_callback(const struct dmi_system_id *id)
+{
+ printk(KERN_INFO KBUILD_MODNAME ": found model '%s'\n", id->ident);
+ return 1;
+}
+
+static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
+ {
+ .ident = "Samsung Q10",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Samsung"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SQ10"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Samsung Q20",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SENS Q20"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Samsung Q25",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NQ25"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Dell Latitude X200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X200"),
+ },
+ .callback = dmi_check_callback,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, samsungq10_dmi_table);
+
+static int __init samsungq10_init(void)
+{
+ if (!force && !dmi_check_system(samsungq10_dmi_table))
+ return -ENODEV;
+
+ samsungq10_device = platform_create_bundle(&samsungq10_driver,
+ samsungq10_probe,
+ NULL, 0, NULL, 0);
+
+ if (IS_ERR(samsungq10_device))
+ return PTR_ERR(samsungq10_device);
+
+ return 0;
+}
+
+static void __exit samsungq10_exit(void)
+{
+ platform_device_unregister(samsungq10_device);
+ platform_driver_unregister(&samsungq10_driver);
+}
+
+module_init(samsungq10_init);
+module_exit(samsungq10_exit);
+
+MODULE_AUTHOR("Frederick van der Wyck <fvanderwyck@gmail.com>");
+MODULE_DESCRIPTION("Samsung Q10 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 26c5b117df2..7bd829f247e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3186,8 +3186,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
/* (assignments unknown, please report if found) */
+ KEY_UNKNOWN, KEY_UNKNOWN,
+
+ /*
+ * The mic mute button only sends 0x1a. It does not
+ * automatically mute the mic or change the mute light.
+ */
+ KEY_MICMUTE, /* 0x1a: Mic mute (since ?400 or so) */
+
+ /* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN,
},
};
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index e57b50b3856..57de051a74b 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -235,4 +235,18 @@ config CHARGER_GPIO
This driver can be build as a module. If so, the module will be
called gpio-charger.
+config CHARGER_MAX8997
+ tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
+ depends on MFD_MAX8997 && REGULATOR_MAX8997
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX8997/LP3974 PMICs.
+
+config CHARGER_MAX8998
+ tristate "Maxim MAX8998/LP3974 PMIC battery charger driver"
+ depends on MFD_MAX8998 && REGULATOR_MAX8998
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX8998/LP3974 PMICs.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 009a90fa8ac..b4af13dd8b6 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -36,3 +36,5 @@ obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
+obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
+obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index dc628cb2e76..8a612dec913 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -14,11 +14,11 @@
#include <linux/apm-emulation.h>
-#define PSY_PROP(psy, prop, val) psy->get_property(psy, \
- POWER_SUPPLY_PROP_##prop, val)
+#define PSY_PROP(psy, prop, val) (psy->get_property(psy, \
+ POWER_SUPPLY_PROP_##prop, val))
-#define _MPSY_PROP(prop, val) main_battery->get_property(main_battery, \
- prop, val)
+#define _MPSY_PROP(prop, val) (main_battery->get_property(main_battery, \
+ prop, val))
#define MPSY_PROP(prop, val) _MPSY_PROP(POWER_SUPPLY_PROP_##prop, val)
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c
index 506585e31a5..9c5e5beda3a 100644
--- a/drivers/power/bq20z75.c
+++ b/drivers/power/bq20z75.c
@@ -152,6 +152,10 @@ struct bq20z75_info {
bool gpio_detect;
bool enable_detection;
int irq;
+ int last_state;
+ int poll_time;
+ struct delayed_work work;
+ int ignore_changes;
};
static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
@@ -279,6 +283,7 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
{
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
s32 ret;
ret = bq20z75_read_word_data(client,
@@ -293,15 +298,24 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
if (ret >= bq20z75_data[reg_offset].min_value &&
ret <= bq20z75_data[reg_offset].max_value) {
val->intval = ret;
- if (psp == POWER_SUPPLY_PROP_STATUS) {
- if (ret & BATTERY_FULL_CHARGED)
- val->intval = POWER_SUPPLY_STATUS_FULL;
- else if (ret & BATTERY_FULL_DISCHARGED)
- val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
- else if (ret & BATTERY_DISCHARGING)
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- else
- val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ if (psp != POWER_SUPPLY_PROP_STATUS)
+ return 0;
+
+ if (ret & BATTERY_FULL_CHARGED)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (ret & BATTERY_FULL_DISCHARGED)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret & BATTERY_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+
+ if (bq20z75_device->poll_time == 0)
+ bq20z75_device->last_state = val->intval;
+ else if (bq20z75_device->last_state != val->intval) {
+ cancel_delayed_work_sync(&bq20z75_device->work);
+ power_supply_changed(&bq20z75_device->power_supply);
+ bq20z75_device->poll_time = 0;
}
} else {
if (psp == POWER_SUPPLY_PROP_STATUS)
@@ -545,6 +559,60 @@ static irqreturn_t bq20z75_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static void bq20z75_external_power_changed(struct power_supply *psy)
+{
+ struct bq20z75_info *bq20z75_device;
+
+ bq20z75_device = container_of(psy, struct bq20z75_info, power_supply);
+
+ if (bq20z75_device->ignore_changes > 0) {
+ bq20z75_device->ignore_changes--;
+ return;
+ }
+
+ /* cancel outstanding work */
+ cancel_delayed_work_sync(&bq20z75_device->work);
+
+ schedule_delayed_work(&bq20z75_device->work, HZ);
+ bq20z75_device->poll_time = bq20z75_device->pdata->poll_retry_count;
+}
+
+static void bq20z75_delayed_work(struct work_struct *work)
+{
+ struct bq20z75_info *bq20z75_device;
+ s32 ret;
+
+ bq20z75_device = container_of(work, struct bq20z75_info, work.work);
+
+ ret = bq20z75_read_word_data(bq20z75_device->client,
+ bq20z75_data[REG_STATUS].addr);
+ /* if the read failed, give up on this work */
+ if (ret < 0) {
+ bq20z75_device->poll_time = 0;
+ return;
+ }
+
+ if (ret & BATTERY_FULL_CHARGED)
+ ret = POWER_SUPPLY_STATUS_FULL;
+ else if (ret & BATTERY_FULL_DISCHARGED)
+ ret = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret & BATTERY_DISCHARGING)
+ ret = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ ret = POWER_SUPPLY_STATUS_CHARGING;
+
+ if (bq20z75_device->last_state != ret) {
+ bq20z75_device->poll_time = 0;
+ power_supply_changed(&bq20z75_device->power_supply);
+ return;
+ }
+ if (bq20z75_device->poll_time > 0) {
+ schedule_delayed_work(&bq20z75_device->work, HZ);
+ bq20z75_device->poll_time--;
+ return;
+ }
+}
+
static int __devinit bq20z75_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -566,6 +634,13 @@ static int __devinit bq20z75_probe(struct i2c_client *client,
bq20z75_device->power_supply.num_properties =
ARRAY_SIZE(bq20z75_properties);
bq20z75_device->power_supply.get_property = bq20z75_get_property;
+ /* ignore first notification of external change, it is generated
+ * from the power_supply_register call back
+ */
+ bq20z75_device->ignore_changes = 1;
+ bq20z75_device->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
+ bq20z75_device->power_supply.external_power_changed =
+ bq20z75_external_power_changed;
if (pdata) {
bq20z75_device->gpio_detect =
@@ -625,6 +700,10 @@ skip_gpio:
dev_info(&client->dev,
"%s: battery gas gauge device registered\n", client->name);
+ INIT_DELAYED_WORK(&bq20z75_device->work, bq20z75_delayed_work);
+
+ bq20z75_device->enable_detection = true;
+
return 0;
exit_psupply:
@@ -648,6 +727,9 @@ static int __devexit bq20z75_remove(struct i2c_client *client)
gpio_free(bq20z75_device->pdata->battery_detect);
power_supply_unregister(&bq20z75_device->power_supply);
+
+ cancel_delayed_work_sync(&bq20z75_device->work);
+
kfree(bq20z75_device);
bq20z75_device = NULL;
@@ -661,6 +743,9 @@ static int bq20z75_suspend(struct i2c_client *client,
struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
s32 ret;
+ if (bq20z75_device->poll_time > 0)
+ cancel_delayed_work_sync(&bq20z75_device->work);
+
/* write to manufacturer access with sleep command */
ret = bq20z75_write_word_data(client,
bq20z75_data[REG_MANUFACTURER_DATA].addr,
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 718f2c53782..a64b8854cfd 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -127,7 +127,7 @@ static int __devinit gpio_charger_probe(struct platform_device *pdev)
ret = request_any_context_irq(irq, gpio_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
dev_name(&pdev->dev), charger);
- if (ret)
+ if (ret < 0)
dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret);
else
gpio_charger->irq = irq;
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index c5c8805156c..98bfab35b8e 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -29,74 +29,6 @@
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
-enum max17042_register {
- MAX17042_STATUS = 0x00,
- MAX17042_VALRT_Th = 0x01,
- MAX17042_TALRT_Th = 0x02,
- MAX17042_SALRT_Th = 0x03,
- MAX17042_AtRate = 0x04,
- MAX17042_RepCap = 0x05,
- MAX17042_RepSOC = 0x06,
- MAX17042_Age = 0x07,
- MAX17042_TEMP = 0x08,
- MAX17042_VCELL = 0x09,
- MAX17042_Current = 0x0A,
- MAX17042_AvgCurrent = 0x0B,
- MAX17042_Qresidual = 0x0C,
- MAX17042_SOC = 0x0D,
- MAX17042_AvSOC = 0x0E,
- MAX17042_RemCap = 0x0F,
- MAX17402_FullCAP = 0x10,
- MAX17042_TTE = 0x11,
- MAX17042_V_empty = 0x12,
-
- MAX17042_RSLOW = 0x14,
-
- MAX17042_AvgTA = 0x16,
- MAX17042_Cycles = 0x17,
- MAX17042_DesignCap = 0x18,
- MAX17042_AvgVCELL = 0x19,
- MAX17042_MinMaxTemp = 0x1A,
- MAX17042_MinMaxVolt = 0x1B,
- MAX17042_MinMaxCurr = 0x1C,
- MAX17042_CONFIG = 0x1D,
- MAX17042_ICHGTerm = 0x1E,
- MAX17042_AvCap = 0x1F,
- MAX17042_ManName = 0x20,
- MAX17042_DevName = 0x21,
- MAX17042_DevChem = 0x22,
-
- MAX17042_TempNom = 0x24,
- MAX17042_TempCold = 0x25,
- MAX17042_TempHot = 0x26,
- MAX17042_AIN = 0x27,
- MAX17042_LearnCFG = 0x28,
- MAX17042_SHFTCFG = 0x29,
- MAX17042_RelaxCFG = 0x2A,
- MAX17042_MiscCFG = 0x2B,
- MAX17042_TGAIN = 0x2C,
- MAx17042_TOFF = 0x2D,
- MAX17042_CGAIN = 0x2E,
- MAX17042_COFF = 0x2F,
-
- MAX17042_Q_empty = 0x33,
- MAX17042_T_empty = 0x34,
-
- MAX17042_RCOMP0 = 0x38,
- MAX17042_TempCo = 0x39,
- MAX17042_Rx = 0x3A,
- MAX17042_T_empty0 = 0x3B,
- MAX17042_TaskPeriod = 0x3C,
- MAX17042_FSTAT = 0x3D,
-
- MAX17042_SHDNTIMER = 0x3F,
-
- MAX17042_VFRemCap = 0x4A,
-
- MAX17042_QH = 0x4D,
- MAX17042_QL = 0x4E,
-};
-
struct max17042_chip {
struct i2c_client *client;
struct power_supply battery;
@@ -123,10 +55,27 @@ static int max17042_read_reg(struct i2c_client *client, u8 reg)
return ret;
}
+static void max17042_set_reg(struct i2c_client *client,
+ struct max17042_reg_data *data, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ max17042_write_reg(client, data[i].addr, data[i].data);
+}
+
static enum power_supply_property max17042_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
};
static int max17042_get_property(struct power_supply *psy,
@@ -137,6 +86,30 @@ static int max17042_get_property(struct power_supply *psy,
struct max17042_chip, battery);
switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_STATUS);
+ if (val->intval & MAX17042_STATUS_BattAbsent)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_Cycles);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_MinMaxVolt);
+ val->intval >>= 8;
+ val->intval *= 20000; /* Units of LSB = 20mV */
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_V_empty);
+ val->intval >>= 7;
+ val->intval *= 10000; /* Units of LSB = 10mV */
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = max17042_read_reg(chip->client,
MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
@@ -149,6 +122,57 @@ static int max17042_get_property(struct power_supply *psy,
val->intval = max17042_read_reg(chip->client,
MAX17042_SOC) / 256;
break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_RepSOC);
+ if ((val->intval / 256) >= MAX17042_BATTERY_FULL)
+ val->intval = 1;
+ else if (val->intval >= 0)
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_TEMP);
+ /* The value is signed. */
+ if (val->intval & 0x8000) {
+ val->intval = (0x7fff & ~val->intval) + 1;
+ val->intval *= -1;
+ }
+ /* The value is converted into deci-centigrade scale */
+ /* Units of LSB = 1 / 256 degree Celsius */
+ val->intval = val->intval * 10 / 256;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (chip->pdata->enable_current_sense) {
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_Current);
+ if (val->intval & 0x8000) {
+ /* Negative */
+ val->intval = ~val->intval & 0x7fff;
+ val->intval++;
+ val->intval *= -1;
+ }
+ val->intval >>= 4;
+ val->intval *= 1000000 * 25 / chip->pdata->r_sns;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ if (chip->pdata->enable_current_sense) {
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_AvgCurrent);
+ if (val->intval & 0x8000) {
+ /* Negative */
+ val->intval = ~val->intval & 0x7fff;
+ val->intval++;
+ val->intval *= -1;
+ }
+ val->intval *= 1562500 / chip->pdata->r_sns;
+ } else {
+ return -EINVAL;
+ }
+ break;
default:
return -EINVAL;
}
@@ -180,18 +204,30 @@ static int __devinit max17042_probe(struct i2c_client *client,
chip->battery.properties = max17042_battery_props;
chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props);
+ /* When current is not measured,
+ * CURRENT_NOW and CURRENT_AVG properties should be invisible. */
+ if (!chip->pdata->enable_current_sense)
+ chip->battery.num_properties -= 2;
+
ret = power_supply_register(&client->dev, &chip->battery);
if (ret) {
dev_err(&client->dev, "failed: power supply register\n");
- i2c_set_clientdata(client, NULL);
kfree(chip);
return ret;
}
+ /* Initialize registers according to values from the platform data */
+ if (chip->pdata->init_data)
+ max17042_set_reg(client, chip->pdata->init_data,
+ chip->pdata->num_init_data);
+
if (!chip->pdata->enable_current_sense) {
max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
+ } else {
+ if (chip->pdata->r_sns == 0)
+ chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
}
return 0;
@@ -202,7 +238,6 @@ static int __devexit max17042_remove(struct i2c_client *client)
struct max17042_chip *chip = i2c_get_clientdata(client);
power_supply_unregister(&chip->battery);
- i2c_set_clientdata(client, NULL);
kfree(chip);
return 0;
}
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index 33ff0e37809..a9b0209a2f5 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -28,7 +28,7 @@
#include <linux/power/max8903_charger.h>
struct max8903_data {
- struct max8903_pdata *pdata;
+ struct max8903_pdata pdata;
struct device *dev;
struct power_supply psy;
bool fault;
@@ -52,8 +52,8 @@ static int max8903_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
- if (data->pdata->chg) {
- if (gpio_get_value(data->pdata->chg) == 0)
+ if (data->pdata.chg) {
+ if (gpio_get_value(data->pdata.chg) == 0)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (data->usb_in || data->ta_in)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -80,7 +80,7 @@ static int max8903_get_property(struct power_supply *psy,
static irqreturn_t max8903_dcin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
bool ta_in;
enum power_supply_type old_type;
@@ -121,7 +121,7 @@ static irqreturn_t max8903_dcin(int irq, void *_data)
static irqreturn_t max8903_usbin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
bool usb_in;
enum power_supply_type old_type;
@@ -160,7 +160,7 @@ static irqreturn_t max8903_usbin(int irq, void *_data)
static irqreturn_t max8903_fault(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
bool fault;
fault = gpio_get_value(pdata->flt) ? false : true;
@@ -193,7 +193,7 @@ static __devinit int max8903_probe(struct platform_device *pdev)
dev_err(dev, "Cannot allocate memory.\n");
return -ENOMEM;
}
- data->pdata = pdata;
+ memcpy(&data->pdata, pdata, sizeof(struct max8903_pdata));
data->dev = dev;
platform_set_drvdata(pdev, data);
@@ -349,7 +349,7 @@ static __devexit int max8903_remove(struct platform_device *pdev)
struct max8903_data *data = platform_get_drvdata(pdev);
if (data) {
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
if (pdata->flt)
free_irq(gpio_to_irq(pdata->flt), data);
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
new file mode 100644
index 00000000000..7106b49b26e
--- /dev/null
+++ b/drivers/power/max8997_charger.c
@@ -0,0 +1,206 @@
+/*
+ * max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+struct charger_data {
+ struct device *dev;
+ struct max8997_dev *iodev;
+ struct power_supply battery;
+};
+
+static enum power_supply_property max8997_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS, /* "FULL" or "NOT FULL" only. */
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
+ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
+};
+
+/* Note that the charger control is done by a current regulator "CHARGER" */
+static int max8997_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct charger_data *charger = container_of(psy,
+ struct charger_data, battery);
+ struct i2c_client *i2c = charger->iodev->i2c;
+ int ret;
+ u8 reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
+ if (ret)
+ return ret;
+ if ((reg & (1 << 0)) == 0x1)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
+ if (ret)
+ return ret;
+ if ((reg & (1 << 2)) == 0x0)
+ val->intval = 1;
+
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
+ if (ret)
+ return ret;
+ /* DCINOK */
+ if (reg & (1 << 1))
+ val->intval = 1;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __devinit int max8997_battery_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct charger_data *charger;
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->eoc_mA) {
+ u8 val = (pdata->eoc_mA - 50) / 10;
+ if (val < 0)
+ val = 0;
+ if (val > 0xf)
+ val = 0xf;
+
+ ret = max8997_update_reg(iodev->i2c,
+ MAX8997_REG_MBCCTRL5, val, 0xf);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot use i2c bus.\n");
+ return ret;
+ }
+ }
+
+ switch (pdata->timeout) {
+ case 5:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x2 << 4, 0x7 << 4);
+ break;
+ case 6:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x3 << 4, 0x7 << 4);
+ break;
+ case 7:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x4 << 4, 0x7 << 4);
+ break;
+ case 0:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x7 << 4, 0x7 << 4);
+ break;
+ default:
+ dev_err(&pdev->dev, "incorrect timeout value (%d)\n",
+ pdata->timeout);
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot use i2c bus.\n");
+ return ret;
+ }
+
+ charger = kzalloc(sizeof(struct charger_data), GFP_KERNEL);
+ if (charger == NULL) {
+ dev_err(&pdev->dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, charger);
+
+ charger->battery.name = "max8997_pmic";
+ charger->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ charger->battery.get_property = max8997_battery_get_property;
+ charger->battery.properties = max8997_battery_props;
+ charger->battery.num_properties = ARRAY_SIZE(max8997_battery_props);
+
+ charger->dev = &pdev->dev;
+ charger->iodev = iodev;
+
+ ret = power_supply_register(&pdev->dev, &charger->battery);
+ if (ret) {
+ dev_err(&pdev->dev, "failed: power supply register\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(charger);
+ return ret;
+}
+
+static int __devexit max8997_battery_remove(struct platform_device *pdev)
+{
+ struct charger_data *charger = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&charger->battery);
+ kfree(charger);
+ return 0;
+}
+
+static const struct platform_device_id max8997_battery_id[] = {
+ { "max8997-battery", 0 },
+};
+
+static struct platform_driver max8997_battery_driver = {
+ .driver = {
+ .name = "max8997-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_battery_probe,
+ .remove = __devexit_p(max8997_battery_remove),
+ .id_table = max8997_battery_id,
+};
+
+static int __init max8997_battery_init(void)
+{
+ return platform_driver_register(&max8997_battery_driver);
+}
+subsys_initcall(max8997_battery_init);
+
+static void __exit max8997_battery_cleanup(void)
+{
+ platform_driver_unregister(&max8997_battery_driver);
+}
+module_exit(max8997_battery_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8997/8966 battery control driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
new file mode 100644
index 00000000000..cc21fa2120b
--- /dev/null
+++ b/drivers/power/max8998_charger.c
@@ -0,0 +1,218 @@
+/*
+ * max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
+ *
+ * Copyright (C) 2009-2010 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/max8998.h>
+#include <linux/mfd/max8998-private.h>
+
+struct max8998_battery_data {
+ struct device *dev;
+ struct max8998_dev *iodev;
+ struct power_supply battery;
+};
+
+static enum power_supply_property max8998_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
+ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
+};
+
+/* Note that the charger control is done by a current regulator "CHARGER" */
+static int max8998_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max8998_battery_data *max8998 = container_of(psy,
+ struct max8998_battery_data, battery);
+ struct i2c_client *i2c = max8998->iodev->i2c;
+ int ret;
+ u8 reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, &reg);
+ if (ret)
+ return ret;
+ if (reg & (1 << 4))
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, &reg);
+ if (ret)
+ return ret;
+ if (reg & (1 << 3))
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __devinit int max8998_battery_probe(struct platform_device *pdev)
+{
+ struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8998_battery_data *max8998;
+ struct i2c_client *i2c;
+ int ret = 0;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "No platform init data supplied\n");
+ return -ENODEV;
+ }
+
+ max8998 = kzalloc(sizeof(struct max8998_battery_data), GFP_KERNEL);
+ if (!max8998)
+ return -ENOMEM;
+
+ max8998->dev = &pdev->dev;
+ max8998->iodev = iodev;
+ platform_set_drvdata(pdev, max8998);
+ i2c = max8998->iodev->i2c;
+
+ /* Setup "End of Charge" */
+ /* If EOC value equals 0,
+ * remain value set from bootloader or default value */
+ if (pdata->eoc >= 10 && pdata->eoc <= 45) {
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1,
+ (pdata->eoc / 5 - 2) << 5, 0x7 << 5);
+ } else if (pdata->eoc == 0) {
+ dev_dbg(max8998->dev,
+ "EOC value not set: leave it unchanged.\n");
+ } else {
+ dev_err(max8998->dev, "Invalid EOC value\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Setup Charge Restart Level */
+ switch (pdata->restart) {
+ case 100:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x1 << 3, 0x3 << 3);
+ break;
+ case 150:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x0 << 3, 0x3 << 3);
+ break;
+ case 200:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x2 << 3, 0x3 << 3);
+ break;
+ case -1:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x3 << 3, 0x3 << 3);
+ break;
+ case 0:
+ dev_dbg(max8998->dev,
+ "Restart Level not set: leave it unchanged.\n");
+ break;
+ default:
+ dev_err(max8998->dev, "Invalid Restart Level\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Setup Charge Full Timeout */
+ switch (pdata->timeout) {
+ case 5:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x0 << 4, 0x3 << 4);
+ break;
+ case 6:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x1 << 4, 0x3 << 4);
+ break;
+ case 7:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x2 << 4, 0x3 << 4);
+ break;
+ case -1:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x3 << 4, 0x3 << 4);
+ break;
+ case 0:
+ dev_dbg(max8998->dev,
+ "Full Timeout not set: leave it unchanged.\n");
+ default:
+ dev_err(max8998->dev, "Invalid Full Timeout value\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ max8998->battery.name = "max8998_pmic";
+ max8998->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ max8998->battery.get_property = max8998_battery_get_property;
+ max8998->battery.properties = max8998_battery_props;
+ max8998->battery.num_properties = ARRAY_SIZE(max8998_battery_props);
+
+ ret = power_supply_register(max8998->dev, &max8998->battery);
+ if (ret) {
+ dev_err(max8998->dev, "failed: power supply register\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(max8998);
+ return ret;
+}
+
+static int __devexit max8998_battery_remove(struct platform_device *pdev)
+{
+ struct max8998_battery_data *max8998 = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&max8998->battery);
+ kfree(max8998);
+
+ return 0;
+}
+
+static const struct platform_device_id max8998_battery_id[] = {
+ { "max8998-battery", TYPE_MAX8998 },
+};
+
+static struct platform_driver max8998_battery_driver = {
+ .driver = {
+ .name = "max8998-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8998_battery_probe,
+ .remove = __devexit_p(max8998_battery_remove),
+ .id_table = max8998_battery_id,
+};
+
+static int __init max8998_battery_init(void)
+{
+ return platform_driver_register(&max8998_battery_driver);
+}
+module_init(max8998_battery_init);
+
+static void __exit max8998_battery_cleanup(void)
+{
+ platform_driver_unregister(&max8998_battery_driver);
+}
+module_exit(max8998_battery_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8998 battery control driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max8998-battery");
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index d36c289aaef..a675e31b4f1 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -266,7 +266,7 @@ static irqreturn_t s3c_adc_bat_charged(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init s3c_adc_bat_probe(struct platform_device *pdev)
+static int __devinit s3c_adc_bat_probe(struct platform_device *pdev)
{
struct s3c_adc_client *client;
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index 92c16e1677b..54b9198fa57 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -62,7 +62,7 @@
#define TWL4030_MSTATEC_COMPLETE4 0x0e
static bool allow_usb;
-module_param(allow_usb, bool, 1);
+module_param(allow_usb, bool, 0644);
MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
struct twl4030_bci {
@@ -425,7 +425,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
{
struct twl4030_bci *bci;
int ret;
- int reg;
+ u32 reg;
bci = kzalloc(sizeof(*bci), GFP_KERNEL);
if (bci == NULL)
@@ -486,7 +486,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
}
/* Enable interrupts now. */
- reg = ~(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
TWL4030_TBATOR1 | TWL4030_BATSTS);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
TWL4030_INTERRUPTS_BCIIMR1A);
@@ -495,7 +495,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
goto fail_unmask_interrupts;
}
- reg = ~(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
+ reg = ~(u32)(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
TWL4030_INTERRUPTS_BCIIMR2A);
if (ret < 0)
@@ -572,7 +572,7 @@ static void __exit twl4030_bci_exit(void)
}
module_exit(twl4030_bci_exit);
-MODULE_AUTHOR("Gražydas Ignotas");
+MODULE_AUTHOR("Gražvydas Ignotas");
MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:twl4030_bci");
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
index 0fd130d80f5..e648cbea1e6 100644
--- a/drivers/power/wm831x_backup.c
+++ b/drivers/power/wm831x_backup.c
@@ -22,6 +22,7 @@
struct wm831x_backup {
struct wm831x *wm831x;
struct power_supply backup;
+ char name[20];
};
static int wm831x_backup_read_voltage(struct wm831x *wm831x,
@@ -163,6 +164,7 @@ static enum power_supply_property wm831x_backup_props[] = {
static __devinit int wm831x_backup_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
struct wm831x_backup *devdata;
struct power_supply *backup;
int ret;
@@ -182,7 +184,14 @@ static __devinit int wm831x_backup_probe(struct platform_device *pdev)
*/
wm831x_config_backup(wm831x);
- backup->name = "wm831x-backup";
+ if (wm831x_pdata && wm831x_pdata->wm831x_num)
+ snprintf(devdata->name, sizeof(devdata->name),
+ "wm831x-backup.%d", wm831x_pdata->wm831x_num);
+ else
+ snprintf(devdata->name, sizeof(devdata->name),
+ "wm831x-backup");
+
+ backup->name = devdata->name;
backup->type = POWER_SUPPLY_TYPE_BATTERY;
backup->properties = wm831x_backup_props;
backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
@@ -203,6 +212,7 @@ static __devexit int wm831x_backup_remove(struct platform_device *pdev)
struct wm831x_backup *devdata = platform_get_drvdata(pdev);
power_supply_unregister(&devdata->backup);
+ kfree(devdata->backup.name);
kfree(devdata);
return 0;
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index ddf8cf5f320..6cc2ca6427f 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -24,6 +24,9 @@ struct wm831x_power {
struct power_supply wall;
struct power_supply usb;
struct power_supply battery;
+ char wall_name[20];
+ char usb_name[20];
+ char battery_name[20];
};
static int wm831x_power_check_online(struct wm831x *wm831x, int supply,
@@ -486,6 +489,7 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
static __devinit int wm831x_power_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
struct wm831x_power *power;
struct power_supply *usb;
struct power_supply *battery;
@@ -503,12 +507,28 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
battery = &power->battery;
wall = &power->wall;
+ if (wm831x_pdata && wm831x_pdata->wm831x_num) {
+ snprintf(power->wall_name, sizeof(power->wall_name),
+ "wm831x-wall.%d", wm831x_pdata->wm831x_num);
+ snprintf(power->battery_name, sizeof(power->wall_name),
+ "wm831x-battery.%d", wm831x_pdata->wm831x_num);
+ snprintf(power->usb_name, sizeof(power->wall_name),
+ "wm831x-usb.%d", wm831x_pdata->wm831x_num);
+ } else {
+ snprintf(power->wall_name, sizeof(power->wall_name),
+ "wm831x-wall");
+ snprintf(power->battery_name, sizeof(power->wall_name),
+ "wm831x-battery");
+ snprintf(power->usb_name, sizeof(power->wall_name),
+ "wm831x-usb");
+ }
+
/* We ignore configuration failures since we can still read back
* the status without enabling the charger.
*/
wm831x_config_battery(wm831x);
- wall->name = "wm831x-wall";
+ wall->name = power->wall_name;
wall->type = POWER_SUPPLY_TYPE_MAINS;
wall->properties = wm831x_wall_props;
wall->num_properties = ARRAY_SIZE(wm831x_wall_props);
@@ -517,7 +537,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_kmalloc;
- battery->name = "wm831x-battery";
+ battery->name = power->battery_name;
battery->properties = wm831x_bat_props;
battery->num_properties = ARRAY_SIZE(wm831x_bat_props);
battery->get_property = wm831x_bat_get_prop;
@@ -526,7 +546,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_wall;
- usb->name = "wm831x-usb",
+ usb->name = power->usb_name,
usb->type = POWER_SUPPLY_TYPE_USB;
usb->properties = wm831x_usb_props;
usb->num_properties = ARRAY_SIZE(wm831x_usb_props);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 118eb213eb3..c7fd2c0e3f2 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -249,6 +249,12 @@ config REGULATOR_TPS6507X
three step-down converters and two general-purpose LDO voltage regulators.
It supports TI's software based Class-2 SmartReflex implementation.
+config REGULATOR_TPS65912
+ tristate "TI TPS65912 Power regulator"
+ depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+ help
+ This driver supports TPS65912 voltage regulator chip.
+
config REGULATOR_88PM8607
bool "Marvell 88PM8607 Power regulators"
depends on MFD_88PM860X=y
@@ -304,5 +310,12 @@ config REGULATOR_TPS65910
help
This driver supports TPS65910 voltage regulator chips.
+config REGULATOR_AAT2870
+ tristate "AnalogicTech AAT2870 Regulators"
+ depends on MFD_AAT2870_CORE
+ help
+ If you have a AnalogicTech AAT2870 say Y to enable the
+ regulator driver.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 3932d2ec38f..040d5aa6353 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -38,10 +38,12 @@ obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
+obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
new file mode 100644
index 00000000000..cd4104542f0
--- /dev/null
+++ b/drivers/regulator/aat2870-regulator.c
@@ -0,0 +1,232 @@
+/*
+ * linux/drivers/regulator/aat2870-regulator.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/aat2870.h>
+
+struct aat2870_regulator {
+ struct platform_device *pdev;
+ struct regulator_desc desc;
+
+ const int *voltages; /* uV */
+
+ int min_uV;
+ int max_uV;
+
+ u8 enable_addr;
+ u8 enable_shift;
+ u8 enable_mask;
+
+ u8 voltage_addr;
+ u8 voltage_shift;
+ u8 voltage_mask;
+};
+
+static int aat2870_ldo_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+
+ return ri->voltages[selector];
+}
+
+static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
+ (selector << ri->voltage_shift) & ri->voltage_mask);
+}
+
+static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+ u8 val;
+ int ret;
+
+ ret = aat2870->read(aat2870, ri->voltage_addr, &val);
+ if (ret)
+ return ret;
+
+ return (val & ri->voltage_mask) >> ri->voltage_shift;
+}
+
+static int aat2870_ldo_enable(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask,
+ ri->enable_mask);
+}
+
+static int aat2870_ldo_disable(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask, 0);
+}
+
+static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+ u8 val;
+ int ret;
+
+ ret = aat2870->read(aat2870, ri->enable_addr, &val);
+ if (ret)
+ return ret;
+
+ return val & ri->enable_mask ? 1 : 0;
+}
+
+static struct regulator_ops aat2870_ldo_ops = {
+ .list_voltage = aat2870_ldo_list_voltage,
+ .set_voltage_sel = aat2870_ldo_set_voltage_sel,
+ .get_voltage_sel = aat2870_ldo_get_voltage_sel,
+ .enable = aat2870_ldo_enable,
+ .disable = aat2870_ldo_disable,
+ .is_enabled = aat2870_ldo_is_enabled,
+};
+
+static const int aat2870_ldo_voltages[] = {
+ 1200000, 1300000, 1500000, 1600000,
+ 1800000, 2000000, 2200000, 2500000,
+ 2600000, 2700000, 2800000, 2900000,
+ 3000000, 3100000, 3200000, 3300000,
+};
+
+#define AAT2870_LDO(ids) \
+ { \
+ .desc = { \
+ .name = #ids, \
+ .id = AAT2870_ID_##ids, \
+ .n_voltages = ARRAY_SIZE(aat2870_ldo_voltages), \
+ .ops = &aat2870_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ .voltages = aat2870_ldo_voltages, \
+ .min_uV = 1200000, \
+ .max_uV = 3300000, \
+ }
+
+static struct aat2870_regulator aat2870_regulators[] = {
+ AAT2870_LDO(LDOA),
+ AAT2870_LDO(LDOB),
+ AAT2870_LDO(LDOC),
+ AAT2870_LDO(LDOD),
+};
+
+static struct aat2870_regulator *aat2870_get_regulator(int id)
+{
+ struct aat2870_regulator *ri = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aat2870_regulators); i++) {
+ ri = &aat2870_regulators[i];
+ if (ri->desc.id == id)
+ break;
+ }
+
+ if (!ri)
+ return NULL;
+
+ ri->enable_addr = AAT2870_LDO_EN;
+ ri->enable_shift = id - AAT2870_ID_LDOA;
+ ri->enable_mask = 0x1 << ri->enable_shift;
+
+ ri->voltage_addr = (id - AAT2870_ID_LDOA) / 2 ?
+ AAT2870_LDO_CD : AAT2870_LDO_AB;
+ ri->voltage_shift = (id - AAT2870_ID_LDOA) % 2 ? 0 : 4;
+ ri->voltage_mask = 0xF << ri->voltage_shift;
+
+ return ri;
+}
+
+static int aat2870_regulator_probe(struct platform_device *pdev)
+{
+ struct aat2870_regulator *ri;
+ struct regulator_dev *rdev;
+
+ ri = aat2870_get_regulator(pdev->id);
+ if (!ri) {
+ dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
+ return -EINVAL;
+ }
+ ri->pdev = pdev;
+
+ rdev = regulator_register(&ri->desc, &pdev->dev,
+ pdev->dev.platform_data, ri);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ ri->desc.name);
+ return PTR_ERR(rdev);
+ }
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static int __devexit aat2870_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver aat2870_regulator_driver = {
+ .driver = {
+ .name = "aat2870-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_regulator_probe,
+ .remove = __devexit_p(aat2870_regulator_remove),
+};
+
+static int __init aat2870_regulator_init(void)
+{
+ return platform_driver_register(&aat2870_regulator_driver);
+}
+subsys_initcall(aat2870_regulator_init);
+
+static void __exit aat2870_regulator_exit(void)
+{
+ platform_driver_unregister(&aat2870_regulator_driver);
+}
+module_exit(aat2870_regulator_exit);
+
+MODULE_DESCRIPTION("AnalogicTech AAT2870 Regulator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d3e38790906..d8e6a429e8b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -20,6 +20,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/async.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
@@ -33,6 +34,8 @@
#include "dummy.h"
+#define rdev_crit(rdev, fmt, ...) \
+ pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
#define rdev_err(rdev, fmt, ...) \
pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
#define rdev_warn(rdev, fmt, ...) \
@@ -78,11 +81,13 @@ struct regulator {
char *supply_name;
struct device_attribute dev_attr;
struct regulator_dev *rdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
-static int _regulator_disable(struct regulator_dev *rdev,
- struct regulator_dev **supply_rdev_ptr);
+static int _regulator_disable(struct regulator_dev *rdev);
static int _regulator_get_voltage(struct regulator_dev *rdev);
static int _regulator_get_current_limit(struct regulator_dev *rdev);
static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
@@ -90,6 +95,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV);
+static struct regulator *create_regulator(struct regulator_dev *rdev,
+ struct device *dev,
+ const char *supply_name);
static const char *rdev_get_name(struct regulator_dev *rdev)
{
@@ -143,8 +151,11 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
if (*min_uV < rdev->constraints->min_uV)
*min_uV = rdev->constraints->min_uV;
- if (*min_uV > *max_uV)
+ if (*min_uV > *max_uV) {
+ rdev_err(rdev, "unsupportable voltage range: %d-%duV\n",
+ *min_uV, *max_uV);
return -EINVAL;
+ }
return 0;
}
@@ -197,8 +208,11 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
if (*min_uA < rdev->constraints->min_uA)
*min_uA = rdev->constraints->min_uA;
- if (*min_uA > *max_uA)
+ if (*min_uA > *max_uA) {
+ rdev_err(rdev, "unsupportable current range: %d-%duA\n",
+ *min_uA, *max_uA);
return -EINVAL;
+ }
return 0;
}
@@ -213,6 +227,7 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
case REGULATOR_MODE_STANDBY:
break;
default:
+ rdev_err(rdev, "invalid mode %x specified\n", *mode);
return -EINVAL;
}
@@ -779,7 +794,6 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
if (ret < 0) {
rdev_err(rdev, "failed to apply %duV constraint\n",
rdev->constraints->min_uV);
- rdev->constraints = NULL;
return ret;
}
}
@@ -882,7 +896,6 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = suspend_prepare(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
- rdev->constraints = NULL;
goto out;
}
}
@@ -909,13 +922,15 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->enable(rdev);
if (ret < 0) {
rdev_err(rdev, "failed to enable\n");
- rdev->constraints = NULL;
goto out;
}
}
print_constraints(rdev);
+ return 0;
out:
+ kfree(rdev->constraints);
+ rdev->constraints = NULL;
return ret;
}
@@ -929,21 +944,20 @@ out:
* core if it's child is enabled.
*/
static int set_supply(struct regulator_dev *rdev,
- struct regulator_dev *supply_rdev)
+ struct regulator_dev *supply_rdev)
{
int err;
- err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj,
- "supply");
- if (err) {
- rdev_err(rdev, "could not add device link %s err %d\n",
- supply_rdev->dev.kobj.name, err);
- goto out;
+ rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
+
+ rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
+ if (IS_ERR(rdev->supply)) {
+ err = PTR_ERR(rdev->supply);
+ rdev->supply = NULL;
+ return err;
}
- rdev->supply = supply_rdev;
- list_add(&rdev->slist, &supply_rdev->supply_list);
-out:
- return err;
+
+ return 0;
}
/**
@@ -1032,7 +1046,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
}
}
-#define REG_STR_SIZE 32
+#define REG_STR_SIZE 64
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
@@ -1052,8 +1066,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
if (dev) {
/* create a 'requested_microamps_name' sysfs entry */
- size = scnprintf(buf, REG_STR_SIZE, "microamps_requested_%s",
- supply_name);
+ size = scnprintf(buf, REG_STR_SIZE,
+ "microamps_requested_%s-%s",
+ dev_name(dev), supply_name);
if (size >= REG_STR_SIZE)
goto overflow_err;
@@ -1088,7 +1103,28 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
dev->kobj.name, err);
goto link_name_err;
}
+ } else {
+ regulator->supply_name = kstrdup(supply_name, GFP_KERNEL);
+ if (regulator->supply_name == NULL)
+ goto attr_err;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ regulator->debugfs = debugfs_create_dir(regulator->supply_name,
+ rdev->debugfs);
+ if (IS_ERR_OR_NULL(regulator->debugfs)) {
+ rdev_warn(rdev, "Failed to create debugfs directory\n");
+ regulator->debugfs = NULL;
+ } else {
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->max_uV);
}
+#endif
+
mutex_unlock(&rdev->mutex);
return regulator;
link_name_err:
@@ -1267,13 +1303,17 @@ void regulator_put(struct regulator *regulator)
mutex_lock(&regulator_list_mutex);
rdev = regulator->rdev;
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(regulator->debugfs);
+#endif
+
/* remove any sysfs entries */
if (regulator->dev) {
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
- kfree(regulator->supply_name);
device_remove_file(regulator->dev, &regulator->dev_attr);
kfree(regulator->dev_attr.attr.name);
}
+ kfree(regulator->supply_name);
list_del(&regulator->list);
kfree(regulator);
@@ -1301,19 +1341,6 @@ static int _regulator_enable(struct regulator_dev *rdev)
{
int ret, delay;
- if (rdev->use_count == 0) {
- /* do we need to enable the supply regulator first */
- if (rdev->supply) {
- mutex_lock(&rdev->supply->mutex);
- ret = _regulator_enable(rdev->supply);
- mutex_unlock(&rdev->supply->mutex);
- if (ret < 0) {
- rdev_err(rdev, "failed to enable: %d\n", ret);
- return ret;
- }
- }
- }
-
/* check voltage and requested load before enabling */
if (rdev->constraints &&
(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
@@ -1388,19 +1415,27 @@ int regulator_enable(struct regulator *regulator)
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
+ if (rdev->supply) {
+ ret = regulator_enable(rdev->supply);
+ if (ret != 0)
+ return ret;
+ }
+
mutex_lock(&rdev->mutex);
ret = _regulator_enable(rdev);
mutex_unlock(&rdev->mutex);
+
+ if (ret != 0)
+ regulator_disable(rdev->supply);
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_enable);
/* locks held by regulator_disable() */
-static int _regulator_disable(struct regulator_dev *rdev,
- struct regulator_dev **supply_rdev_ptr)
+static int _regulator_disable(struct regulator_dev *rdev)
{
int ret = 0;
- *supply_rdev_ptr = NULL;
if (WARN(rdev->use_count <= 0,
"unbalanced disables for %s\n", rdev_get_name(rdev)))
@@ -1427,9 +1462,6 @@ static int _regulator_disable(struct regulator_dev *rdev,
NULL);
}
- /* decrease our supplies ref count and disable if required */
- *supply_rdev_ptr = rdev->supply;
-
rdev->use_count = 0;
} else if (rdev->use_count > 1) {
@@ -1440,6 +1472,7 @@ static int _regulator_disable(struct regulator_dev *rdev,
rdev->use_count--;
}
+
return ret;
}
@@ -1458,29 +1491,21 @@ static int _regulator_disable(struct regulator_dev *rdev,
int regulator_disable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- struct regulator_dev *supply_rdev = NULL;
int ret = 0;
mutex_lock(&rdev->mutex);
- ret = _regulator_disable(rdev, &supply_rdev);
+ ret = _regulator_disable(rdev);
mutex_unlock(&rdev->mutex);
- /* decrease our supplies ref count and disable if required */
- while (supply_rdev != NULL) {
- rdev = supply_rdev;
-
- mutex_lock(&rdev->mutex);
- _regulator_disable(rdev, &supply_rdev);
- mutex_unlock(&rdev->mutex);
- }
+ if (ret == 0 && rdev->supply)
+ regulator_disable(rdev->supply);
return ret;
}
EXPORT_SYMBOL_GPL(regulator_disable);
/* locks held by regulator_force_disable() */
-static int _regulator_force_disable(struct regulator_dev *rdev,
- struct regulator_dev **supply_rdev_ptr)
+static int _regulator_force_disable(struct regulator_dev *rdev)
{
int ret = 0;
@@ -1497,10 +1522,6 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
REGULATOR_EVENT_DISABLE, NULL);
}
- /* decrease our supplies ref count and disable if required */
- *supply_rdev_ptr = rdev->supply;
-
- rdev->use_count = 0;
return ret;
}
@@ -1516,16 +1537,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
int regulator_force_disable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- struct regulator_dev *supply_rdev = NULL;
int ret;
mutex_lock(&rdev->mutex);
regulator->uA_load = 0;
- ret = _regulator_force_disable(rdev, &supply_rdev);
+ ret = _regulator_force_disable(regulator->rdev);
mutex_unlock(&rdev->mutex);
- if (supply_rdev)
- regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev)));
+ if (rdev->supply)
+ while (rdev->open_count--)
+ regulator_disable(rdev->supply);
return ret;
}
@@ -2136,7 +2157,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
/* get input voltage */
input_uV = 0;
if (rdev->supply)
- input_uV = _regulator_get_voltage(rdev->supply);
+ input_uV = regulator_get_voltage(rdev->supply);
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
@@ -2206,17 +2227,8 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier);
static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
- struct regulator_dev *_rdev;
-
/* call rdev chain first */
blocking_notifier_call_chain(&rdev->notifier, event, NULL);
-
- /* now notify regulator we supply */
- list_for_each_entry(_rdev, &rdev->supply_list, slist) {
- mutex_lock(&_rdev->mutex);
- _notifier_call_chain(_rdev, event, data);
- mutex_unlock(&_rdev->mutex);
- }
}
/**
@@ -2264,6 +2276,13 @@ err:
}
EXPORT_SYMBOL_GPL(regulator_bulk_get);
+static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
+{
+ struct regulator_bulk_data *bulk = data;
+
+ bulk->ret = regulator_enable(bulk->consumer);
+}
+
/**
* regulator_bulk_enable - enable multiple regulator consumers
*
@@ -2279,21 +2298,33 @@ EXPORT_SYMBOL_GPL(regulator_bulk_get);
int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers)
{
+ LIST_HEAD(async_domain);
int i;
- int ret;
+ int ret = 0;
+
+ for (i = 0; i < num_consumers; i++)
+ async_schedule_domain(regulator_bulk_enable_async,
+ &consumers[i], &async_domain);
+
+ async_synchronize_full_domain(&async_domain);
+ /* If any consumer failed we need to unwind any that succeeded */
for (i = 0; i < num_consumers; i++) {
- ret = regulator_enable(consumers[i].consumer);
- if (ret != 0)
+ if (consumers[i].ret != 0) {
+ ret = consumers[i].ret;
goto err;
+ }
}
return 0;
err:
- pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret);
- for (--i; i >= 0; --i)
- regulator_disable(consumers[i].consumer);
+ for (i = 0; i < num_consumers; i++)
+ if (consumers[i].ret == 0)
+ regulator_disable(consumers[i].consumer);
+ else
+ pr_err("Failed to enable %s: %d\n",
+ consumers[i].supply, consumers[i].ret);
return ret;
}
@@ -2589,9 +2620,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
rdev->owner = regulator_desc->owner;
rdev->desc = regulator_desc;
INIT_LIST_HEAD(&rdev->consumer_list);
- INIT_LIST_HEAD(&rdev->supply_list);
INIT_LIST_HEAD(&rdev->list);
- INIT_LIST_HEAD(&rdev->slist);
BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
/* preform any regulator specific init */
@@ -2672,6 +2701,7 @@ unset_supplies:
unset_regulator_supplies(rdev);
scrub:
+ kfree(rdev->constraints);
device_unregister(&rdev->dev);
/* device core frees rdev */
rdev = ERR_PTR(ret);
@@ -2703,7 +2733,7 @@ void regulator_unregister(struct regulator_dev *rdev)
unset_regulator_supplies(rdev);
list_del(&rdev->list);
if (rdev->supply)
- sysfs_remove_link(&rdev->dev.kobj, "supply");
+ regulator_put(rdev->supply);
device_unregister(&rdev->dev);
kfree(rdev->constraints);
mutex_unlock(&regulator_list_mutex);
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index c7410bde7b5..f6ef6694ab9 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -36,6 +36,29 @@ static struct regulator_desc dummy_desc = {
.ops = &dummy_ops,
};
+static int __devinit dummy_regulator_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
+ &dummy_initdata, NULL);
+ if (IS_ERR(dummy_regulator_rdev)) {
+ ret = PTR_ERR(dummy_regulator_rdev);
+ pr_err("Failed to register regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver dummy_regulator_driver = {
+ .probe = dummy_regulator_probe,
+ .driver = {
+ .name = "reg-dummy",
+ .owner = THIS_MODULE,
+ },
+};
+
static struct platform_device *dummy_pdev;
void __init regulator_dummy_init(void)
@@ -55,12 +78,9 @@ void __init regulator_dummy_init(void)
return;
}
- dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
- &dummy_initdata, NULL);
- if (IS_ERR(dummy_regulator_rdev)) {
- ret = PTR_ERR(dummy_regulator_rdev);
- pr_err("Failed to register regulator: %d\n", ret);
+ ret = platform_driver_register(&dummy_regulator_driver);
+ if (ret != 0) {
+ pr_err("Failed to register dummy regulator driver: %d\n", ret);
platform_device_unregister(dummy_pdev);
- return;
}
}
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 55dd4e6650d..66d2d60b436 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -49,7 +49,6 @@
#define TPS65911_REG_LDO7 11
#define TPS65911_REG_LDO8 12
-#define TPS65910_NUM_REGULATOR 13
#define TPS65910_SUPPLY_STATE_ENABLED 0x1
/* supported VIO voltages in milivolts */
@@ -264,11 +263,12 @@ static struct tps_info tps65911_regs[] = {
};
struct tps65910_reg {
- struct regulator_desc desc[TPS65910_NUM_REGULATOR];
+ struct regulator_desc *desc;
struct tps65910 *mfd;
- struct regulator_dev *rdev[TPS65910_NUM_REGULATOR];
- struct tps_info *info[TPS65910_NUM_REGULATOR];
+ struct regulator_dev **rdev;
+ struct tps_info **info;
struct mutex mutex;
+ int num_regulators;
int mode;
int (*get_ctrl_reg)(int);
};
@@ -759,8 +759,13 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
mult = (selector / VDD1_2_NUM_VOLTS) + 1;
volt = VDD1_2_MIN_VOLT +
(selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
+ break;
case TPS65911_REG_VDDCTRL:
volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
+ break;
+ default:
+ BUG();
+ return -EINVAL;
}
return volt * 100 * mult;
@@ -897,16 +902,42 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
switch(tps65910_chip_id(tps65910)) {
case TPS65910:
pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
+ pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
info = tps65910_regs;
+ break;
case TPS65911:
pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
+ pmic->num_regulators = ARRAY_SIZE(tps65911_regs);
info = tps65911_regs;
+ break;
default:
pr_err("Invalid tps chip version\n");
+ kfree(pmic);
return -ENODEV;
}
- for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) {
+ pmic->desc = kcalloc(pmic->num_regulators,
+ sizeof(struct regulator_desc), GFP_KERNEL);
+ if (!pmic->desc) {
+ err = -ENOMEM;
+ goto err_free_pmic;
+ }
+
+ pmic->info = kcalloc(pmic->num_regulators,
+ sizeof(struct tps_info *), GFP_KERNEL);
+ if (!pmic->info) {
+ err = -ENOMEM;
+ goto err_free_desc;
+ }
+
+ pmic->rdev = kcalloc(pmic->num_regulators,
+ sizeof(struct regulator_dev *), GFP_KERNEL);
+ if (!pmic->rdev) {
+ err = -ENOMEM;
+ goto err_free_info;
+ }
+
+ for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) {
/* Register the regulators */
pmic->info[i] = info;
@@ -938,7 +969,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
"failed to register %s regulator\n",
pdev->name);
err = PTR_ERR(rdev);
- goto err;
+ goto err_unregister_regulator;
}
/* Save regulator for cleanup */
@@ -946,23 +977,31 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
}
return 0;
-err:
+err_unregister_regulator:
while (--i >= 0)
regulator_unregister(pmic->rdev[i]);
-
+ kfree(pmic->rdev);
+err_free_info:
+ kfree(pmic->info);
+err_free_desc:
+ kfree(pmic->desc);
+err_free_pmic:
kfree(pmic);
return err;
}
static int __devexit tps65910_remove(struct platform_device *pdev)
{
- struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev);
+ struct tps65910_reg *pmic = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < TPS65910_NUM_REGULATOR; i++)
- regulator_unregister(tps65910_reg->rdev[i]);
+ for (i = 0; i < pmic->num_regulators; i++)
+ regulator_unregister(pmic->rdev[i]);
- kfree(tps65910_reg);
+ kfree(pmic->rdev);
+ kfree(pmic->info);
+ kfree(pmic->desc);
+ kfree(pmic);
return 0;
}
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
new file mode 100644
index 00000000000..3a9313e00fa
--- /dev/null
+++ b/drivers/regulator/tps65912-regulator.c
@@ -0,0 +1,800 @@
+/*
+ * tps65912.c -- TI tps65912
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65912.h>
+
+/* DCDC's */
+#define TPS65912_REG_DCDC1 0
+#define TPS65912_REG_DCDC2 1
+#define TPS65912_REG_DCDC3 2
+#define TPS65912_REG_DCDC4 3
+
+/* LDOs */
+#define TPS65912_REG_LDO1 4
+#define TPS65912_REG_LDO2 5
+#define TPS65912_REG_LDO3 6
+#define TPS65912_REG_LDO4 7
+#define TPS65912_REG_LDO5 8
+#define TPS65912_REG_LDO6 9
+#define TPS65912_REG_LDO7 10
+#define TPS65912_REG_LDO8 11
+#define TPS65912_REG_LDO9 12
+#define TPS65912_REG_LDO10 13
+
+#define TPS65912_MAX_REG_ID TPS65912_REG_LDO_10
+
+/* Number of step-down converters available */
+#define TPS65912_NUM_DCDC 4
+
+/* Number of LDO voltage regulators available */
+#define TPS65912_NUM_LDO 10
+
+/* Number of total regulators available */
+#define TPS65912_NUM_REGULATOR (TPS65912_NUM_DCDC + TPS65912_NUM_LDO)
+
+#define TPS65912_REG_ENABLED 0x80
+#define OP_SELREG_MASK 0x40
+#define OP_SELREG_SHIFT 6
+
+struct tps_info {
+ const char *name;
+};
+
+static struct tps_info tps65912_regs[] = {
+ {
+ .name = "DCDC1",
+ },
+ {
+ .name = "DCDC2",
+ },
+ {
+ .name = "DCDC3",
+ },
+ {
+ .name = "DCDC4",
+ },
+ {
+ .name = "LDO1",
+ },
+ {
+ .name = "LDO2",
+ },
+ {
+ .name = "LDO3",
+ },
+ {
+ .name = "LDO4",
+ },
+ {
+ .name = "LDO5",
+ },
+ {
+ .name = "LDO6",
+ },
+ {
+ .name = "LDO7",
+ },
+ {
+ .name = "LDO8",
+ },
+ {
+ .name = "LDO9",
+ },
+ {
+ .name = "LDO10",
+ },
+};
+
+struct tps65912_reg {
+ struct regulator_desc desc[TPS65912_NUM_REGULATOR];
+ struct tps65912 *mfd;
+ struct regulator_dev *rdev[TPS65912_NUM_REGULATOR];
+ struct tps_info *info[TPS65912_NUM_REGULATOR];
+ /* for read/write access */
+ struct mutex io_lock;
+ int mode;
+ int (*get_ctrl_reg)(int);
+ int dcdc1_range;
+ int dcdc2_range;
+ int dcdc3_range;
+ int dcdc4_range;
+ int pwm_mode_reg;
+ int eco_reg;
+};
+
+static int tps65912_get_range(struct tps65912_reg *pmic, int id)
+{
+ struct tps65912 *mfd = pmic->mfd;
+
+ if (id > TPS65912_REG_DCDC4)
+ return 0;
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ pmic->dcdc1_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC1_LIMIT);
+ if (pmic->dcdc1_range < 0)
+ return pmic->dcdc1_range;
+ pmic->dcdc1_range = (pmic->dcdc1_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc1_range;
+ case TPS65912_REG_DCDC2:
+ pmic->dcdc2_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC2_LIMIT);
+ if (pmic->dcdc2_range < 0)
+ return pmic->dcdc2_range;
+ pmic->dcdc2_range = (pmic->dcdc2_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc2_range;
+ case TPS65912_REG_DCDC3:
+ pmic->dcdc3_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC3_LIMIT);
+ if (pmic->dcdc3_range < 0)
+ return pmic->dcdc3_range;
+ pmic->dcdc3_range = (pmic->dcdc3_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc3_range;
+ case TPS65912_REG_DCDC4:
+ pmic->dcdc4_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC4_LIMIT);
+ if (pmic->dcdc4_range < 0)
+ return pmic->dcdc4_range;
+ pmic->dcdc4_range = (pmic->dcdc4_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc4_range;
+ default:
+ return 0;
+ }
+}
+
+static unsigned long tps65912_vsel_to_uv_range0(u8 vsel)
+{
+ unsigned long uv;
+
+ uv = ((vsel * 12500) + 500000);
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_range1(u8 vsel)
+{
+ unsigned long uv;
+
+ uv = ((vsel * 12500) + 700000);
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_range2(u8 vsel)
+{
+ unsigned long uv;
+
+ uv = ((vsel * 25000) + 500000);
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_range3(u8 vsel)
+{
+ unsigned long uv;
+
+ if (vsel == 0x3f)
+ uv = 3800000;
+ else
+ uv = ((vsel * 50000) + 500000);
+
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_ldo(u8 vsel)
+{
+ unsigned long uv = 0;
+
+ if (vsel <= 32)
+ uv = ((vsel * 25000) + 800000);
+ else if (vsel > 32 && vsel <= 60)
+ uv = (((vsel - 32) * 50000) + 1600000);
+ else if (vsel > 60)
+ uv = (((vsel - 60) * 100000) + 3000000);
+
+ return uv;
+}
+
+static int tps65912_get_ctrl_register(int id)
+{
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ return TPS65912_DCDC1_AVS;
+ case TPS65912_REG_DCDC2:
+ return TPS65912_DCDC2_AVS;
+ case TPS65912_REG_DCDC3:
+ return TPS65912_DCDC3_AVS;
+ case TPS65912_REG_DCDC4:
+ return TPS65912_DCDC4_AVS;
+ case TPS65912_REG_LDO1:
+ return TPS65912_LDO1_AVS;
+ case TPS65912_REG_LDO2:
+ return TPS65912_LDO2_AVS;
+ case TPS65912_REG_LDO3:
+ return TPS65912_LDO3_AVS;
+ case TPS65912_REG_LDO4:
+ return TPS65912_LDO4_AVS;
+ case TPS65912_REG_LDO5:
+ return TPS65912_LDO5;
+ case TPS65912_REG_LDO6:
+ return TPS65912_LDO6;
+ case TPS65912_REG_LDO7:
+ return TPS65912_LDO7;
+ case TPS65912_REG_LDO8:
+ return TPS65912_LDO8;
+ case TPS65912_REG_LDO9:
+ return TPS65912_LDO9;
+ case TPS65912_REG_LDO10:
+ return TPS65912_LDO10;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tps65912_get_dcdc_sel_register(struct tps65912_reg *pmic, int id)
+{
+ struct tps65912 *mfd = pmic->mfd;
+ int opvsel = 0, sr = 0;
+ u8 reg = 0;
+
+ if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_DCDC4)
+ return -EINVAL;
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC1_OP);
+ sr = ((opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT);
+ if (sr)
+ reg = TPS65912_DCDC1_AVS;
+ else
+ reg = TPS65912_DCDC1_OP;
+ break;
+ case TPS65912_REG_DCDC2:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC2_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_DCDC2_AVS;
+ else
+ reg = TPS65912_DCDC2_OP;
+ break;
+ case TPS65912_REG_DCDC3:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC3_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_DCDC3_AVS;
+ else
+ reg = TPS65912_DCDC3_OP;
+ break;
+ case TPS65912_REG_DCDC4:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC4_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_DCDC4_AVS;
+ else
+ reg = TPS65912_DCDC4_OP;
+ break;
+ }
+ return reg;
+}
+
+static int tps65912_get_ldo_sel_register(struct tps65912_reg *pmic, int id)
+{
+ struct tps65912 *mfd = pmic->mfd;
+ int opvsel = 0, sr = 0;
+ u8 reg = 0;
+
+ if (id < TPS65912_REG_LDO1 || id > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ switch (id) {
+ case TPS65912_REG_LDO1:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO1_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO1_AVS;
+ else
+ reg = TPS65912_LDO1_OP;
+ break;
+ case TPS65912_REG_LDO2:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO2_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO2_AVS;
+ else
+ reg = TPS65912_LDO2_OP;
+ break;
+ case TPS65912_REG_LDO3:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO3_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO3_AVS;
+ else
+ reg = TPS65912_LDO3_OP;
+ break;
+ case TPS65912_REG_LDO4:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO4_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO4_AVS;
+ else
+ reg = TPS65912_LDO4_OP;
+ break;
+ case TPS65912_REG_LDO5:
+ reg = TPS65912_LDO5;
+ break;
+ case TPS65912_REG_LDO6:
+ reg = TPS65912_LDO6;
+ break;
+ case TPS65912_REG_LDO7:
+ reg = TPS65912_LDO7;
+ break;
+ case TPS65912_REG_LDO8:
+ reg = TPS65912_LDO8;
+ break;
+ case TPS65912_REG_LDO9:
+ reg = TPS65912_LDO9;
+ break;
+ case TPS65912_REG_LDO10:
+ reg = TPS65912_LDO10;
+ break;
+ }
+
+ return reg;
+}
+
+static int tps65912_get_mode_regiters(struct tps65912_reg *pmic, int id)
+{
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ pmic->pwm_mode_reg = TPS65912_DCDC1_CTRL;
+ pmic->eco_reg = TPS65912_DCDC1_AVS;
+ break;
+ case TPS65912_REG_DCDC2:
+ pmic->pwm_mode_reg = TPS65912_DCDC2_CTRL;
+ pmic->eco_reg = TPS65912_DCDC2_AVS;
+ break;
+ case TPS65912_REG_DCDC3:
+ pmic->pwm_mode_reg = TPS65912_DCDC3_CTRL;
+ pmic->eco_reg = TPS65912_DCDC3_AVS;
+ break;
+ case TPS65912_REG_DCDC4:
+ pmic->pwm_mode_reg = TPS65912_DCDC4_CTRL;
+ pmic->eco_reg = TPS65912_DCDC4_AVS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tps65912_reg_is_enabled(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int reg, value, id = rdev_get_id(dev);
+
+ if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ value = tps65912_reg_read(mfd, reg);
+ if (value < 0)
+ return value;
+
+ return value & TPS65912_REG_ENABLED;
+}
+
+static int tps65912_reg_enable(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev);
+ int reg;
+
+ if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65912_set_bits(mfd, reg, TPS65912_REG_ENABLED);
+}
+
+static int tps65912_reg_disable(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev), reg;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65912_clear_bits(mfd, reg, TPS65912_REG_ENABLED);
+}
+
+static int tps65912_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int pwm_mode, eco, id = rdev_get_id(dev);
+
+ tps65912_get_mode_regiters(pmic, id);
+
+ pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
+ eco = tps65912_reg_read(mfd, pmic->eco_reg);
+
+ pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
+ eco &= DCDC_AVS_ECO_MASK;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ /* Verify if mode alredy set */
+ if (pwm_mode && !eco)
+ break;
+ tps65912_set_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
+ tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ case REGULATOR_MODE_IDLE:
+ if (!pwm_mode && !eco)
+ break;
+ tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
+ tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
+ break;
+ case REGULATOR_MODE_STANDBY:
+ if (!pwm_mode && eco)
+ break;
+ tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
+ tps65912_set_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned int tps65912_get_mode(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int pwm_mode, eco, mode = 0, id = rdev_get_id(dev);
+
+ tps65912_get_mode_regiters(pmic, id);
+
+ pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
+ eco = tps65912_reg_read(mfd, pmic->eco_reg);
+
+ pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
+ eco &= DCDC_AVS_ECO_MASK;
+
+ if (pwm_mode && !eco)
+ mode = REGULATOR_MODE_FAST;
+ else if (!pwm_mode && !eco)
+ mode = REGULATOR_MODE_NORMAL;
+ else if (!pwm_mode && eco)
+ mode = REGULATOR_MODE_STANDBY;
+
+ return mode;
+}
+
+static int tps65912_get_voltage_dcdc(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev), voltage = 0, range;
+ int opvsel = 0, avsel = 0, sr, vsel;
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC1_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC1_AVS);
+ range = pmic->dcdc1_range;
+ break;
+ case TPS65912_REG_DCDC2:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC2_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC2_AVS);
+ range = pmic->dcdc2_range;
+ break;
+ case TPS65912_REG_DCDC3:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC3_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC3_AVS);
+ range = pmic->dcdc3_range;
+ break;
+ case TPS65912_REG_DCDC4:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC4_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC4_AVS);
+ range = pmic->dcdc4_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ vsel = avsel;
+ else
+ vsel = opvsel;
+ vsel &= 0x3F;
+
+ switch (range) {
+ case 0:
+ /* 0.5 - 1.2875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range0(vsel);
+ break;
+ case 1:
+ /* 0.7 - 1.4875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range1(vsel);
+ break;
+ case 2:
+ /* 0.5 - 2.075V in 25mV steps */
+ voltage = tps65912_vsel_to_uv_range2(vsel);
+ break;
+ case 3:
+ /* 0.5 - 3.8V in 50mV steps */
+ voltage = tps65912_vsel_to_uv_range3(vsel);
+ break;
+ }
+ return voltage;
+}
+
+static int tps65912_set_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev);
+ int value;
+ u8 reg;
+
+ reg = tps65912_get_dcdc_sel_register(pmic, id);
+ value = tps65912_reg_read(mfd, reg);
+ value &= 0xC0;
+ return tps65912_reg_write(mfd, reg, selector | value);
+}
+
+static int tps65912_get_voltage_ldo(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev);
+ int vsel = 0;
+ u8 reg;
+
+ reg = tps65912_get_ldo_sel_register(pmic, id);
+ vsel = tps65912_reg_read(mfd, reg);
+ vsel &= 0x3F;
+
+ return tps65912_vsel_to_uv_ldo(vsel);
+}
+
+static int tps65912_set_voltage_ldo(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev), reg, value;
+
+ reg = tps65912_get_ldo_sel_register(pmic, id);
+ value = tps65912_reg_read(mfd, reg);
+ value &= 0xC0;
+ return tps65912_reg_write(mfd, reg, selector | value);
+}
+
+static int tps65912_list_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ int range, voltage = 0, id = rdev_get_id(dev);
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ range = pmic->dcdc1_range;
+ break;
+ case TPS65912_REG_DCDC2:
+ range = pmic->dcdc2_range;
+ break;
+ case TPS65912_REG_DCDC3:
+ range = pmic->dcdc3_range;
+ break;
+ case TPS65912_REG_DCDC4:
+ range = pmic->dcdc4_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (range) {
+ case 0:
+ /* 0.5 - 1.2875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range0(selector);
+ break;
+ case 1:
+ /* 0.7 - 1.4875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range1(selector);
+ break;
+ case 2:
+ /* 0.5 - 2.075V in 25mV steps */
+ voltage = tps65912_vsel_to_uv_range2(selector);
+ break;
+ case 3:
+ /* 0.5 - 3.8V in 50mV steps */
+ voltage = tps65912_vsel_to_uv_range3(selector);
+ break;
+ }
+ return voltage;
+}
+
+static int tps65912_list_voltage_ldo(struct regulator_dev *dev,
+ unsigned selector)
+{
+ int ldo = rdev_get_id(dev);
+
+ if (ldo < TPS65912_REG_LDO1 || ldo > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ return tps65912_vsel_to_uv_ldo(selector);
+}
+
+/* Operations permitted on DCDCx */
+static struct regulator_ops tps65912_ops_dcdc = {
+ .is_enabled = tps65912_reg_is_enabled,
+ .enable = tps65912_reg_enable,
+ .disable = tps65912_reg_disable,
+ .set_mode = tps65912_set_mode,
+ .get_mode = tps65912_get_mode,
+ .get_voltage = tps65912_get_voltage_dcdc,
+ .set_voltage_sel = tps65912_set_voltage_dcdc,
+ .list_voltage = tps65912_list_voltage_dcdc,
+};
+
+/* Operations permitted on LDOx */
+static struct regulator_ops tps65912_ops_ldo = {
+ .is_enabled = tps65912_reg_is_enabled,
+ .enable = tps65912_reg_enable,
+ .disable = tps65912_reg_disable,
+ .get_voltage = tps65912_get_voltage_ldo,
+ .set_voltage_sel = tps65912_set_voltage_ldo,
+ .list_voltage = tps65912_list_voltage_ldo,
+};
+
+static __devinit int tps65912_probe(struct platform_device *pdev)
+{
+ struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
+ struct tps_info *info;
+ struct regulator_init_data *reg_data;
+ struct regulator_dev *rdev;
+ struct tps65912_reg *pmic;
+ struct tps65912_board *pmic_plat_data;
+ int i, err;
+
+ pmic_plat_data = dev_get_platdata(tps65912->dev);
+ if (!pmic_plat_data)
+ return -EINVAL;
+
+ reg_data = pmic_plat_data->tps65912_pmic_init_data;
+
+ pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ mutex_init(&pmic->io_lock);
+ pmic->mfd = tps65912;
+ platform_set_drvdata(pdev, pmic);
+
+ pmic->get_ctrl_reg = &tps65912_get_ctrl_register;
+ info = tps65912_regs;
+
+ for (i = 0; i < TPS65912_NUM_REGULATOR; i++, info++, reg_data++) {
+ int range = 0;
+ /* Register the regulators */
+ pmic->info[i] = info;
+
+ pmic->desc[i].name = info->name;
+ pmic->desc[i].id = i;
+ pmic->desc[i].n_voltages = 64;
+ pmic->desc[i].ops = (i > TPS65912_REG_DCDC4 ?
+ &tps65912_ops_ldo : &tps65912_ops_dcdc);
+ pmic->desc[i].type = REGULATOR_VOLTAGE;
+ pmic->desc[i].owner = THIS_MODULE;
+ range = tps65912_get_range(pmic, i);
+ rdev = regulator_register(&pmic->desc[i],
+ tps65912->dev, reg_data, pmic);
+ if (IS_ERR(rdev)) {
+ dev_err(tps65912->dev,
+ "failed to register %s regulator\n",
+ pdev->name);
+ err = PTR_ERR(rdev);
+ goto err;
+ }
+
+ /* Save regulator for cleanup */
+ pmic->rdev[i] = rdev;
+ }
+ return 0;
+
+err:
+ while (--i >= 0)
+ regulator_unregister(pmic->rdev[i]);
+
+ kfree(pmic);
+ return err;
+}
+
+static int __devexit tps65912_remove(struct platform_device *pdev)
+{
+ struct tps65912_reg *tps65912_reg = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < TPS65912_NUM_REGULATOR; i++)
+ regulator_unregister(tps65912_reg->rdev[i]);
+
+ kfree(tps65912_reg);
+ return 0;
+}
+
+static struct platform_driver tps65912_driver = {
+ .driver = {
+ .name = "tps65912-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_probe,
+ .remove = __devexit_p(tps65912_remove),
+};
+
+/**
+ * tps65912_init
+ *
+ * Module init function
+ */
+static int __init tps65912_init(void)
+{
+ return platform_driver_register(&tps65912_driver);
+}
+subsys_initcall(tps65912_init);
+
+/**
+ * tps65912_cleanup
+ *
+ * Module exit function
+ */
+static void __exit tps65912_cleanup(void)
+{
+ platform_driver_unregister(&tps65912_driver);
+}
+module_exit(tps65912_cleanup);
+
+MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS65912 voltage regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65912-pmic");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 87fe0f75a56..ee8747f4fa0 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -835,8 +835,8 @@ static struct regulator_ops twlsmps_ops = {
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf, TWL4030, twl4030fixed_ops)
-#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \
- TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
+ TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
0x0, TWL6030, twl6030fixed_ops)
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
@@ -856,24 +856,22 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
- .id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
- .n_voltages = (max_mVolts - min_mVolts)/100, \
+ .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
- .id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
.desc = { \
@@ -903,9 +901,8 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
-#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \
+#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) { \
.base = offset, \
- .id = num, \
.delay = turnon_delay, \
.desc = { \
.name = #label, \
@@ -916,9 +913,8 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
-#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \
+#define TWL6025_ADJUSTABLE_SMPS(label, offset) { \
.base = offset, \
- .id = num, \
.min_mV = 600, \
.max_mV = 2100, \
.desc = { \
@@ -961,32 +957,32 @@ static struct twlreg_info twl_regs[] = {
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7),
- TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0),
- TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0),
- TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0),
- TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0),
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0),
+ TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0),
/* 6025 are renamed compared to 6030 versions */
- TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1),
- TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2),
- TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3),
- TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4),
- TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5),
- TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7),
- TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16),
- TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17),
- TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18),
-
- TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1),
- TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2),
- TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3),
+ TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300),
+
+ TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34),
+ TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10),
+ TWL6025_ADJUSTABLE_SMPS(VIO, 0x16),
};
static u8 twl_get_smps_offset(void)
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index a0982e80985..bd3531d8b2a 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -267,23 +267,6 @@ static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
return vsel;
}
-static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- u16 vsel;
-
- if (max_uV < 600000 || max_uV > 1800000)
- return -EINVAL;
-
- vsel = ((max_uV - 600000) / 12500) + 8;
-
- if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV ||
- wm831x_buckv_list_voltage(rdev, vsel) < max_uV)
- return -EINVAL;
-
- return vsel;
-}
-
static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
@@ -338,28 +321,23 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
if (ret < 0)
return ret;
- /* Set the high voltage as the DVS voltage. This is optimised
- * for CPUfreq usage, most processors will keep the maximum
- * voltage constant and lower the minimum with the frequency. */
- vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV);
- if (vsel < 0) {
- /* This should never happen - at worst the same vsel
- * should be chosen */
- WARN_ON(vsel < 0);
- return 0;
+ /*
+ * If this VSEL is higher than the last one we've seen then
+ * remember it as the DVS VSEL. This is optimised for CPUfreq
+ * usage where we want to get to the highest voltage very
+ * quickly.
+ */
+ if (vsel > dcdc->dvs_vsel) {
+ ret = wm831x_set_bits(wm831x, dvs_reg,
+ WM831X_DC1_DVS_VSEL_MASK,
+ dcdc->dvs_vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = vsel;
+ else
+ dev_warn(wm831x->dev,
+ "Failed to set DCDC DVS VSEL: %d\n", ret);
}
- /* Don't bother if it's the same VSEL we're already using */
- if (vsel == dcdc->on_vsel)
- return 0;
-
- ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel);
- if (ret == 0)
- dcdc->dvs_vsel = vsel;
- else
- dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n",
- ret);
-
return 0;
}
@@ -456,27 +434,6 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
if (!pdata || !pdata->dvs_gpio)
return;
- switch (pdata->dvs_control_src) {
- case 1:
- ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
- break;
- case 2:
- ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
- break;
- default:
- dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
- pdata->dvs_control_src, dcdc->name);
- return;
- }
-
- ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
- WM831X_DC1_DVS_SRC_MASK, ctrl);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
- dcdc->name, ret);
- return;
- }
-
ret = gpio_request(pdata->dvs_gpio, "DCDC DVS");
if (ret < 0) {
dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
@@ -498,17 +455,57 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
}
dcdc->dvs_gpio = pdata->dvs_gpio;
+
+ switch (pdata->dvs_control_src) {
+ case 1:
+ ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ case 2:
+ ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
+ pdata->dvs_control_src, dcdc->name);
+ return;
+ }
+
+ /* If DVS_VSEL is set to the minimum value then raise it to ON_VSEL
+ * to make bootstrapping a bit smoother.
+ */
+ if (!dcdc->dvs_vsel) {
+ ret = wm831x_set_bits(wm831x,
+ dcdc->base + WM831X_DCDC_DVS_CONTROL,
+ WM831X_DC1_DVS_VSEL_MASK, dcdc->on_vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = dcdc->on_vsel;
+ else
+ dev_warn(wm831x->dev, "Failed to set DVS_VSEL: %d\n",
+ ret);
+ }
+
+ ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
+ WM831X_DC1_DVS_SRC_MASK, ctrl);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
+ dcdc->name, ret);
+ }
}
static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->dcdc);
+ int id;
struct wm831x_dcdc *dcdc;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
if (pdata == NULL || pdata->dcdc[id] == NULL)
@@ -545,7 +542,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
}
dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK;
- ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
+ ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL);
if (ret < 0) {
dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret);
goto err;
@@ -709,11 +706,17 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->dcdc);
+ int id;
struct wm831x_dcdc *dcdc;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
if (pdata == NULL || pdata->dcdc[id] == NULL)
@@ -1046,3 +1049,4 @@ MODULE_DESCRIPTION("WM831x DC-DC convertor driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-buckv");
MODULE_ALIAS("platform:wm831x-buckp");
+MODULE_ALIAS("platform:wm831x-epe");
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 2220cf8defb..6709710a059 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -310,11 +310,17 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
if (pdata == NULL || pdata->ldo[id] == NULL)
@@ -574,11 +580,17 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
if (pdata == NULL || pdata->ldo[id] == NULL)
@@ -764,11 +776,18 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
+
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
if (pdata == NULL || pdata->ldo[id] == NULL)
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 35b2958d510..1a6a690f24d 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -43,7 +43,7 @@ static int wm8994_ldo_enable(struct regulator_dev *rdev)
if (!ldo->enable)
return 0;
- gpio_set_value(ldo->enable, 1);
+ gpio_set_value_cansleep(ldo->enable, 1);
ldo->is_enabled = true;
return 0;
@@ -57,7 +57,7 @@ static int wm8994_ldo_disable(struct regulator_dev *rdev)
if (!ldo->enable)
return -EINVAL;
- gpio_set_value(ldo->enable, 0);
+ gpio_set_value_cansleep(ldo->enable, 0);
ldo->is_enabled = false;
return 0;
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index bcae8dd4149..7789002bdd5 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -368,7 +368,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
pr_info("%s: already running\n", pdev->name);
/* force to 24 hour mode */
- new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
+ new_ctrl = reg & (OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
new_ctrl |= OMAP_RTC_CTRL_STOP;
/* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 432444af7ee..a1d3ddba99c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
@@ -888,11 +889,11 @@ char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
{
char *buffer;
- buffer = kmalloc(user_len + 1, GFP_KERNEL);
+ buffer = vmalloc(user_len + 1);
if (buffer == NULL)
return ERR_PTR(-ENOMEM);
if (copy_from_user(buffer, user_buf, user_len) != 0) {
- kfree(buffer);
+ vfree(buffer);
return ERR_PTR(-EFAULT);
}
/* got the string, now strip linefeed. */
@@ -930,7 +931,7 @@ static ssize_t dasd_stats_write(struct file *file,
dasd_profile_off(prof);
} else
rc = -EINVAL;
- kfree(buffer);
+ vfree(buffer);
return rc;
}
@@ -1042,7 +1043,7 @@ static ssize_t dasd_stats_global_write(struct file *file,
dasd_global_profile_level = DASD_PROFILE_OFF;
} else
rc = -EINVAL;
- kfree(buffer);
+ vfree(buffer);
return rc;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 30fb979d684..6e835c9fdfc 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1461,6 +1461,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
"Read device characteristic failed, rc=%d", rc);
goto out_err3;
}
+
+ if ((device->features & DASD_FEATURE_USERAW) &&
+ !(private->rdc_data.facilities.RT_in_LR)) {
+ dev_err(&device->cdev->dev, "The storage server does not "
+ "support raw-track access\n");
+ rc = -EINVAL;
+ goto out_err3;
+ }
+
/* find the valid cylinder size */
if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
private->rdc_data.long_no_cyl)
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 6c3c5364d08..e12989fff4f 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -312,14 +312,14 @@ static ssize_t dasd_stats_proc_write(struct file *file,
pr_info("The statistics have been reset\n");
} else
goto out_parse_error;
- kfree(buffer);
+ vfree(buffer);
return user_len;
out_parse_error:
rc = -EINVAL;
pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
str);
out_error:
- kfree(buffer);
+ vfree(buffer);
return rc;
#else
pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index 7ad30e72f86..5f9f929e891 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -82,12 +82,9 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write,
return -EFAULT;
} else {
len = *count;
- rc = copy_from_user(buf, buffer, sizeof(buf));
- if (rc != 0)
- return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- if (strict_strtoul(buf, 0, &val) != 0)
- return -EINVAL;
+ rc = kstrtoul_from_user(buffer, len, 0, &val);
+ if (rc)
+ return rc;
if (val != 0 && val != 1)
return -EINVAL;
callhome_enabled = val;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 7bc643f3f5a..e5c966462c5 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -14,6 +14,8 @@
#include "chsc.h"
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
+#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
+#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
/*
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f8b03a636e4..0e615cb912d 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -188,19 +188,13 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
struct qdio_irq *irq_ptr = seq->private;
struct qdio_q *q;
unsigned long val;
- char buf[8];
int ret, i;
if (!irq_ptr)
return 0;
- if (count >= sizeof(buf))
- return -EINVAL;
- if (copy_from_user(&buf, ubuf, count))
- return -EFAULT;
- buf[count] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+
+ ret = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (ret)
return ret;
switch (val) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e58169c3247..288c9140290 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -313,7 +313,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
- int cc;
+ int retries = 0, cc;
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
@@ -325,6 +325,7 @@ again:
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
+ retries++;
if (!start_time) {
start_time = get_clock();
@@ -333,6 +334,11 @@ again:
if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
+ if (retries) {
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
+ "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
+ }
return cc;
}
@@ -728,13 +734,14 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
static int qdio_kick_outbound_q(struct qdio_q *q)
{
+ int retries = 0, cc;
unsigned int busy_bit;
- int cc;
if (!need_siga_out(q))
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
+retry:
qperf_inc(q, siga_write);
cc = qdio_siga_output(q, &busy_bit);
@@ -743,7 +750,11 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
break;
case 2:
if (busy_bit) {
- DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
+ while (++retries < QDIO_BUSY_BIT_RETRIES) {
+ mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
+ goto retry;
+ }
+ DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
cc |= QDIO_ERROR_SIGA_BUSY;
} else
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
@@ -753,6 +764,10 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
break;
}
+ if (retries) {
+ DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
+ DBF_ERROR("count:%u", retries);
+ }
return cc;
}
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index d6702e57d42..dc8d022c07a 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -34,6 +34,9 @@ static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
static DEFINE_MUTEX(clock_list_sem);
+/* clock disable operations are not passed on to hardware during boot */
+static int allow_disable;
+
void clk_rate_table_build(struct clk *clk,
struct cpufreq_frequency_table *freq_table,
int nr_freqs,
@@ -228,7 +231,7 @@ static void __clk_disable(struct clk *clk)
return;
if (!(--clk->usecount)) {
- if (likely(clk->ops && clk->ops->disable))
+ if (likely(allow_disable && clk->ops && clk->ops->disable))
clk->ops->disable(clk);
if (likely(clk->parent))
__clk_disable(clk->parent);
@@ -393,7 +396,7 @@ int clk_register(struct clk *clk)
{
int ret;
- if (clk == NULL || IS_ERR(clk))
+ if (IS_ERR_OR_NULL(clk))
return -EINVAL;
/*
@@ -744,3 +747,25 @@ err_out:
return err;
}
late_initcall(clk_debugfs_init);
+
+static int __init clk_late_init(void)
+{
+ unsigned long flags;
+ struct clk *clk;
+
+ /* disable all clocks with zero use count */
+ mutex_lock(&clock_list_sem);
+ spin_lock_irqsave(&clock_lock, flags);
+
+ list_for_each_entry(clk, &clock_list, node)
+ if (!clk->usecount && clk->ops && clk->ops->disable)
+ clk->ops->disable(clk);
+
+ /* from now on allow clock disable operations */
+ allow_disable = 1;
+
+ spin_unlock_irqrestore(&clock_lock, flags);
+ mutex_unlock(&clock_list_sem);
+ return 0;
+}
+late_initcall(clk_late_init);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index eba88c749fb..730b4a37b82 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2267,17 +2267,13 @@ static int __devexit
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
- int status = 0;
+
if (!pl022)
return 0;
/* Remove the queue */
- status = destroy_queue(pl022);
- if (status != 0) {
- dev_err(&adev->dev,
- "queue remove failed (%d)\n", status);
- return status;
- }
+ if (destroy_queue(pl022) != 0)
+ dev_err(&adev->dev, "queue remove failed\n");
load_ssp_default_config(pl022);
pl022_dma_remove(pl022);
free_irq(adev->irq[0], pl022);
@@ -2289,7 +2285,6 @@ pl022_remove(struct amba_device *adev)
spi_unregister_master(pl022->master);
spi_master_put(pl022->master);
amba_set_drvdata(adev, NULL);
- dev_dbg(&adev->dev, "remove succeeded\n");
return 0;
}
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 564ff4e0dbc..8345fb457a4 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -1,5 +1,6 @@
config ISCSI_TARGET
tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
+ depends on NET
select CRYPTO
select CRYPTO_CRC32C
select CRYPTO_CRC32C_INTEL if X86
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 14c81c4265b..c24fb10de60 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -120,7 +120,7 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
struct iscsi_tiqn *tiqn = NULL;
int ret;
- if (strlen(buf) > ISCSI_IQN_LEN) {
+ if (strlen(buf) >= ISCSI_IQN_LEN) {
pr_err("Target IQN exceeds %d bytes\n",
ISCSI_IQN_LEN);
return ERR_PTR(-EINVAL);
@@ -1857,7 +1857,7 @@ static int iscsit_handle_text_cmd(
char *text_ptr, *text_in;
int cmdsn_ret, niov = 0, rx_got, rx_size;
u32 checksum = 0, data_crc = 0, payload_length;
- u32 padding = 0, text_length = 0;
+ u32 padding = 0, pad_bytes = 0, text_length = 0;
struct iscsi_cmd *cmd;
struct kvec iov[3];
struct iscsi_text *hdr;
@@ -1896,7 +1896,7 @@ static int iscsit_handle_text_cmd(
padding = ((-payload_length) & 3);
if (padding != 0) {
- iov[niov].iov_base = cmd->pad_bytes;
+ iov[niov].iov_base = &pad_bytes;
iov[niov++].iov_len = padding;
rx_size += padding;
pr_debug("Receiving %u additional bytes"
@@ -1917,7 +1917,7 @@ static int iscsit_handle_text_cmd(
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
text_in, text_length,
- padding, cmd->pad_bytes,
+ padding, (u8 *)&pad_bytes,
(u8 *)&data_crc);
if (checksum != data_crc) {
@@ -3468,7 +3468,12 @@ static inline void iscsit_thread_check_cpumask(
}
#else
-#define iscsit_thread_get_cpumask(X) ({})
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+ return;
+}
+
#define iscsit_thread_check_cpumask(X, Y, Z) ({})
#endif /* CONFIG_SMP */
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 32bb92c4445..f095e65b1cc 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -181,7 +181,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(
return ERR_PTR(-EOVERFLOW);
}
memset(buf, 0, MAX_PORTAL_LEN + 1);
- snprintf(buf, MAX_PORTAL_LEN, "%s", name);
+ snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 713a4d23557..4d087ac1106 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -978,7 +978,7 @@ struct iscsi_login *iscsi_target_init_negotiation(
pr_err("Unable to allocate memory for struct iscsi_login.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- goto out;
+ return NULL;
}
login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c75a01a1c47..89760329d5d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1747,6 +1747,8 @@ int transport_generic_handle_cdb(
}
EXPORT_SYMBOL(transport_generic_handle_cdb);
+static void transport_generic_request_failure(struct se_cmd *,
+ struct se_device *, int, int);
/*
* Used by fabric module frontends to queue tasks directly.
* Many only be used from process context only
@@ -1754,6 +1756,8 @@ EXPORT_SYMBOL(transport_generic_handle_cdb);
int transport_handle_cdb_direct(
struct se_cmd *cmd)
{
+ int ret;
+
if (!cmd->se_lun) {
dump_stack();
pr_err("cmd->se_lun is NULL\n");
@@ -1765,8 +1769,31 @@ int transport_handle_cdb_direct(
" from interrupt context\n");
return -EINVAL;
}
-
- return transport_generic_new_cmd(cmd);
+ /*
+ * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
+ * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
+ * in existing usage to ensure that outstanding descriptors are handled
+ * correctly during shutdown via transport_generic_wait_for_tasks()
+ *
+ * Also, we don't take cmd->t_state_lock here as we only expect
+ * this to be called for initial descriptor submission.
+ */
+ cmd->t_state = TRANSPORT_NEW_CMD;
+ atomic_set(&cmd->t_transport_active, 1);
+ /*
+ * transport_generic_new_cmd() is already handling QUEUE_FULL,
+ * so follow TRANSPORT_NEW_CMD processing thread context usage
+ * and call transport_generic_request_failure() if necessary..
+ */
+ ret = transport_generic_new_cmd(cmd);
+ if (ret == -EAGAIN)
+ return 0;
+ else if (ret < 0) {
+ cmd->transport_error_status = ret;
+ transport_generic_request_failure(cmd, NULL, 0,
+ (cmd->data_direction != DMA_TO_DEVICE));
+ }
+ return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -3324,7 +3351,7 @@ static int transport_generic_cmd_sequencer(
goto out_invalid_cdb_field;
}
- cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
/*
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index f7fff7ed63c..bd4fe21a23b 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -187,4 +187,9 @@ void ft_dump_cmd(struct ft_cmd *, const char *caller);
ssize_t ft_format_wwn(char *, size_t, u64);
+/*
+ * Underlying HW specific helper function
+ */
+void ft_invl_hw_context(struct ft_cmd *);
+
#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 09df38b4610..5654dc22f7a 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -320,6 +320,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
default:
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
+ ft_invl_hw_context(cmd);
fc_frame_free(fp);
transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
break;
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 8e2a46ddccc..c37f4cd9645 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -213,62 +213,49 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
goto drop;
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ ep = fc_seq_exch(seq);
+ lport = ep->lp;
+ if (cmd->was_ddp_setup) {
+ BUG_ON(!ep);
+ BUG_ON(!lport);
+ }
+
/*
- * Doesn't expect even single byte of payload. Payload
+ * Doesn't expect payload if DDP is setup. Payload
* is expected to be copied directly to user buffers
- * due to DDP (Large Rx offload) feature, hence
- * BUG_ON if BUF is non-NULL
+ * due to DDP (Large Rx offload),
*/
buf = fc_frame_payload_get(fp, 1);
- if (cmd->was_ddp_setup && buf) {
- pr_debug("%s: When DDP was setup, not expected to"
- "receive frame with payload, Payload shall be"
- "copied directly to buffer instead of coming "
- "via. legacy receive queues\n", __func__);
- BUG_ON(buf);
- }
+ if (buf)
+ pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+ "cmd->sg_cnt 0x%x. DDP was setup"
+ " hence not expected to receive frame with "
+ "payload, Frame will be dropped if "
+ "'Sequence Initiative' bit in f_ctl is "
+ "not set\n", __func__, ep->xid, f_ctl,
+ cmd->sg, cmd->sg_cnt);
+ /*
+ * Invalidate HW DDP context if it was setup for respective
+ * command. Invalidation of HW DDP context is requited in both
+ * situation (success and error).
+ */
+ ft_invl_hw_context(cmd);
/*
- * If ft_cmd indicated 'ddp_setup', in that case only the last frame
- * should come with 'TSI bit being set'. If 'TSI bit is not set and if
- * data frame appears here, means error condition. In both the cases
- * release the DDP context (ddp_put) and in error case, as well
- * initiate error recovery mechanism.
+ * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+ * write data frame is received successfully where payload is
+ * posted directly to user buffer and only the last frame's
+ * header is posted in receive queue.
+ *
+ * If "Sequence Initiative (TSI)" bit is not set, means error
+ * condition w.r.t. DDP, hence drop the packet and let explict
+ * ABORTS from other end of exchange timer trigger the recovery.
*/
- ep = fc_seq_exch(seq);
- if (cmd->was_ddp_setup) {
- BUG_ON(!ep);
- lport = ep->lp;
- BUG_ON(!lport);
- }
- if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
- f_ctl = ntoh24(fh->fh_f_ctl);
- /*
- * If TSI bit set in f_ctl, means last write data frame is
- * received successfully where payload is posted directly
- * to user buffer and only the last frame's header is posted
- * in legacy receive queue
- */
- if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
- cmd->write_data_len = lport->tt.ddp_done(lport,
- ep->xid);
- goto last_frame;
- } else {
- /*
- * Updating the write_data_len may be meaningless at
- * this point, but just in case if required in future
- * for debugging or any other purpose
- */
- pr_err("%s: Received frame with TSI bit not"
- " being SET, dropping the frame, "
- "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
- __func__, cmd->sg, cmd->sg_cnt);
- cmd->write_data_len = lport->tt.ddp_done(lport,
- ep->xid);
- lport->tt.seq_exch_abort(cmd->seq, 0);
- goto drop;
- }
- }
+ if (f_ctl & FC_FC_SEQ_INIT)
+ goto last_frame;
+ else
+ goto drop;
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);
@@ -331,3 +318,39 @@ last_frame:
drop:
fc_frame_free(fp);
}
+
+/*
+ * Handle and cleanup any HW specific resources if
+ * received ABORTS, errors, timeouts.
+ */
+void ft_invl_hw_context(struct ft_cmd *cmd)
+{
+ struct fc_seq *seq = cmd->seq;
+ struct fc_exch *ep = NULL;
+ struct fc_lport *lport = NULL;
+
+ BUG_ON(!cmd);
+
+ /* Cleanup the DDP context in HW if DDP was setup */
+ if (cmd->was_ddp_setup && seq) {
+ ep = fc_seq_exch(seq);
+ if (ep) {
+ lport = ep->lp;
+ if (lport && (ep->xid <= lport->lro_xid))
+ /*
+ * "ddp_done" trigger invalidation of HW
+ * specific DDP context
+ */
+ cmd->write_data_len = lport->tt.ddp_done(lport,
+ ep->xid);
+
+ /*
+ * Resetting same variable to indicate HW's
+ * DDP context has been invalidated to avoid
+ * re_invalidation of same context (context is
+ * identified using ep->xid)
+ */
+ cmd->was_ddp_setup = 0;
+ }
+ }
+}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index bf7c687519e..f7f71b2d310 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -14,11 +14,7 @@ menuconfig THERMAL
If you want this support, you should say Y or M here.
config THERMAL_HWMON
- bool "Hardware monitoring support"
+ bool
depends on THERMAL
depends on HWMON=y || HWMON=THERMAL
- help
- The generic thermal sysfs driver's hardware monitoring support
- requires a 2.10.7/3.0.2 or later lm-sensors userspace.
-
- Say Y if your user-space is new enough.
+ default y
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 0b1c82ad680..708f8e92771 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -420,6 +420,29 @@ thermal_cooling_device_trip_point_show(struct device *dev,
/* hwmon sys I/F */
#include <linux/hwmon.h>
+
+/* thermal zone devices with the same type share one hwmon device */
+struct thermal_hwmon_device {
+ char type[THERMAL_NAME_LENGTH];
+ struct device *device;
+ int count;
+ struct list_head tz_list;
+ struct list_head node;
+};
+
+struct thermal_hwmon_attr {
+ struct device_attribute attr;
+ char name[16];
+};
+
+/* one temperature input for each thermal zone */
+struct thermal_hwmon_temp {
+ struct list_head hwmon_node;
+ struct thermal_zone_device *tz;
+ struct thermal_hwmon_attr temp_input; /* hwmon sys attr */
+ struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */
+};
+
static LIST_HEAD(thermal_hwmon_list);
static ssize_t
@@ -437,9 +460,10 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
int ret;
struct thermal_hwmon_attr *hwmon_attr
= container_of(attr, struct thermal_hwmon_attr, attr);
- struct thermal_zone_device *tz
- = container_of(hwmon_attr, struct thermal_zone_device,
+ struct thermal_hwmon_temp *temp
+ = container_of(hwmon_attr, struct thermal_hwmon_temp,
temp_input);
+ struct thermal_zone_device *tz = temp->tz;
ret = tz->ops->get_temp(tz, &temperature);
@@ -455,9 +479,10 @@ temp_crit_show(struct device *dev, struct device_attribute *attr,
{
struct thermal_hwmon_attr *hwmon_attr
= container_of(attr, struct thermal_hwmon_attr, attr);
- struct thermal_zone_device *tz
- = container_of(hwmon_attr, struct thermal_zone_device,
+ struct thermal_hwmon_temp *temp
+ = container_of(hwmon_attr, struct thermal_hwmon_temp,
temp_crit);
+ struct thermal_zone_device *tz = temp->tz;
long temperature;
int ret;
@@ -469,22 +494,54 @@ temp_crit_show(struct device *dev, struct device_attribute *attr,
}
-static int
-thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+static struct thermal_hwmon_device *
+thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
{
struct thermal_hwmon_device *hwmon;
- int new_hwmon_device = 1;
- int result;
mutex_lock(&thermal_list_lock);
list_for_each_entry(hwmon, &thermal_hwmon_list, node)
if (!strcmp(hwmon->type, tz->type)) {
- new_hwmon_device = 0;
mutex_unlock(&thermal_list_lock);
- goto register_sys_interface;
+ return hwmon;
+ }
+ mutex_unlock(&thermal_list_lock);
+
+ return NULL;
+}
+
+/* Find the temperature input matching a given thermal zone */
+static struct thermal_hwmon_temp *
+thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
+ const struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_temp *temp;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(temp, &hwmon->tz_list, hwmon_node)
+ if (temp->tz == tz) {
+ mutex_unlock(&thermal_list_lock);
+ return temp;
}
mutex_unlock(&thermal_list_lock);
+ return NULL;
+}
+
+static int
+thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_device *hwmon;
+ struct thermal_hwmon_temp *temp;
+ int new_hwmon_device = 1;
+ int result;
+
+ hwmon = thermal_hwmon_lookup_by_type(tz);
+ if (hwmon) {
+ new_hwmon_device = 0;
+ goto register_sys_interface;
+ }
+
hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
@@ -502,30 +559,36 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
goto free_mem;
register_sys_interface:
- tz->hwmon = hwmon;
+ temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL);
+ if (!temp) {
+ result = -ENOMEM;
+ goto unregister_name;
+ }
+
+ temp->tz = tz;
hwmon->count++;
- snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH,
+ snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH,
"temp%d_input", hwmon->count);
- tz->temp_input.attr.attr.name = tz->temp_input.name;
- tz->temp_input.attr.attr.mode = 0444;
- tz->temp_input.attr.show = temp_input_show;
- sysfs_attr_init(&tz->temp_input.attr.attr);
- result = device_create_file(hwmon->device, &tz->temp_input.attr);
+ temp->temp_input.attr.attr.name = temp->temp_input.name;
+ temp->temp_input.attr.attr.mode = 0444;
+ temp->temp_input.attr.show = temp_input_show;
+ sysfs_attr_init(&temp->temp_input.attr.attr);
+ result = device_create_file(hwmon->device, &temp->temp_input.attr);
if (result)
- goto unregister_name;
+ goto free_temp_mem;
if (tz->ops->get_crit_temp) {
unsigned long temperature;
if (!tz->ops->get_crit_temp(tz, &temperature)) {
- snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH,
+ snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH,
"temp%d_crit", hwmon->count);
- tz->temp_crit.attr.attr.name = tz->temp_crit.name;
- tz->temp_crit.attr.attr.mode = 0444;
- tz->temp_crit.attr.show = temp_crit_show;
- sysfs_attr_init(&tz->temp_crit.attr.attr);
+ temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+ temp->temp_crit.attr.attr.mode = 0444;
+ temp->temp_crit.attr.show = temp_crit_show;
+ sysfs_attr_init(&temp->temp_crit.attr.attr);
result = device_create_file(hwmon->device,
- &tz->temp_crit.attr);
+ &temp->temp_crit.attr);
if (result)
goto unregister_input;
}
@@ -534,13 +597,15 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
mutex_lock(&thermal_list_lock);
if (new_hwmon_device)
list_add_tail(&hwmon->node, &thermal_hwmon_list);
- list_add_tail(&tz->hwmon_node, &hwmon->tz_list);
+ list_add_tail(&temp->hwmon_node, &hwmon->tz_list);
mutex_unlock(&thermal_list_lock);
return 0;
unregister_input:
- device_remove_file(hwmon->device, &tz->temp_input.attr);
+ device_remove_file(hwmon->device, &temp->temp_input.attr);
+ free_temp_mem:
+ kfree(temp);
unregister_name:
if (new_hwmon_device) {
device_remove_file(hwmon->device, &dev_attr_name);
@@ -556,15 +621,30 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
static void
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
- struct thermal_hwmon_device *hwmon = tz->hwmon;
+ struct thermal_hwmon_device *hwmon;
+ struct thermal_hwmon_temp *temp;
+
+ hwmon = thermal_hwmon_lookup_by_type(tz);
+ if (unlikely(!hwmon)) {
+ /* Should never happen... */
+ dev_dbg(&tz->device, "hwmon device lookup failed!\n");
+ return;
+ }
+
+ temp = thermal_hwmon_lookup_temp(hwmon, tz);
+ if (unlikely(!temp)) {
+ /* Should never happen... */
+ dev_dbg(&tz->device, "temperature input lookup failed!\n");
+ return;
+ }
- tz->hwmon = NULL;
- device_remove_file(hwmon->device, &tz->temp_input.attr);
+ device_remove_file(hwmon->device, &temp->temp_input.attr);
if (tz->ops->get_crit_temp)
- device_remove_file(hwmon->device, &tz->temp_crit.attr);
+ device_remove_file(hwmon->device, &temp->temp_crit.attr);
mutex_lock(&thermal_list_lock);
- list_del(&tz->hwmon_node);
+ list_del(&temp->hwmon_node);
+ kfree(temp);
if (!list_empty(&hwmon->tz_list)) {
mutex_unlock(&thermal_list_lock);
return;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index cb40b82daf3..4dcb37bbdf9 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -959,7 +959,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
- depends on HAVE_CLK && (SUPERH || H8300 || ARCH_SHMOBILE)
+ depends on HAVE_CLK && (SUPERH || ARCH_SHMOBILE)
select SERIAL_CORE
config SERIAL_SH_SCI_NR_UARTS
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 827db765459..7e91b3d368c 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1286,22 +1286,17 @@ static int serial_imx_resume(struct platform_device *dev)
static int serial_imx_probe_dt(struct imx_port *sport,
struct platform_device *pdev)
{
+ static int portnum = 0;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
of_match_device(imx_uart_dt_ids, &pdev->dev);
- int ret;
if (!np)
return -ENODEV;
- ret = of_alias_get_id(np, "serial");
- if (ret < 0) {
- pr_err("%s: failed to get alias id, errno %d\n",
- __func__, ret);
- return -ENODEV;
- } else {
- sport->port.line = ret;
- }
+ sport->port.line = portnum++;
+ if (sport->port.line >= UART_NR)
+ return -EINVAL;
if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
sport->have_rtscts = 1;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ebd8629c108..2ec57b2fb27 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -54,10 +54,6 @@
#include <asm/sh_bios.h>
#endif
-#ifdef CONFIG_H8300
-#include <asm/gpio.h>
-#endif
-
#include "sh-sci.h"
struct sci_port {
@@ -66,12 +62,6 @@ struct sci_port {
/* Platform configuration */
struct plat_sci_port *cfg;
- /* Port enable callback */
- void (*enable)(struct uart_port *port);
-
- /* Port disable callback */
- void (*disable)(struct uart_port *port);
-
/* Break timer */
struct timer_list break_timer;
int break_flag;
@@ -81,6 +71,8 @@ struct sci_port {
/* Function clock */
struct clk *fclk;
+ char *irqstr[SCIx_NR_IRQS];
+
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
@@ -121,6 +113,278 @@ to_sci_port(struct uart_port *uart)
return container_of(uart, struct sci_port, port);
}
+struct plat_sci_reg {
+ u8 offset, size;
+};
+
+/* Helper for invalidating specific entries of an inherited map. */
+#define sci_reg_invalid { .offset = 0, .size = 0 }
+
+static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
+ [SCIx_PROBE_REGTYPE] = {
+ [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SCI definitions, dependent on the port's regshift
+ * value.
+ */
+ [SCIx_SCI_REGTYPE] = {
+ [SCSMR] = { 0x00, 8 },
+ [SCBRR] = { 0x01, 8 },
+ [SCSCR] = { 0x02, 8 },
+ [SCxTDR] = { 0x03, 8 },
+ [SCxSR] = { 0x04, 8 },
+ [SCxRDR] = { 0x05, 8 },
+ [SCFCR] = sci_reg_invalid,
+ [SCFDR] = sci_reg_invalid,
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common definitions for legacy IrDA ports, dependent on
+ * regshift value.
+ */
+ [SCIx_IRDA_REGTYPE] = {
+ [SCSMR] = { 0x00, 8 },
+ [SCBRR] = { 0x01, 8 },
+ [SCSCR] = { 0x02, 8 },
+ [SCxTDR] = { 0x03, 8 },
+ [SCxSR] = { 0x04, 8 },
+ [SCxRDR] = { 0x05, 8 },
+ [SCFCR] = { 0x06, 8 },
+ [SCFDR] = { 0x07, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SCIFA definitions.
+ */
+ [SCIx_SCIFA_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x20, 8 },
+ [SCxSR] = { 0x14, 16 },
+ [SCxRDR] = { 0x24, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SCIFB definitions.
+ */
+ [SCIx_SCIFB_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x40, 8 },
+ [SCxSR] = { 0x14, 16 },
+ [SCxRDR] = { 0x60, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SH-3 SCIF definitions.
+ */
+ [SCIx_SH3_SCIF_REGTYPE] = {
+ [SCSMR] = { 0x00, 8 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 8 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0a, 8 },
+ [SCFCR] = { 0x0c, 8 },
+ [SCFDR] = { 0x0e, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SH-4(A) SCIF(B) definitions.
+ */
+ [SCIx_SH4_SCIF_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = { 0x20, 16 },
+ [SCLSR] = { 0x24, 16 },
+ },
+
+ /*
+ * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
+ * register.
+ */
+ [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = { 0x24, 16 },
+ },
+
+ /*
+ * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
+ * count registers.
+ */
+ [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */
+ [SCRFDR] = { 0x20, 16 },
+ [SCSPTR] = { 0x24, 16 },
+ [SCLSR] = { 0x28, 16 },
+ },
+
+ /*
+ * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
+ * registers.
+ */
+ [SCIx_SH7705_SCIF_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x20, 8 },
+ [SCxSR] = { 0x14, 16 },
+ [SCxRDR] = { 0x24, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+};
+
+#define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
+
+/*
+ * The "offset" here is rather misleading, in that it refers to an enum
+ * value relative to the port mapping rather than the fixed offset
+ * itself, which needs to be manually retrieved from the platform's
+ * register map for the given port.
+ */
+static unsigned int sci_serial_in(struct uart_port *p, int offset)
+{
+ struct plat_sci_reg *reg = sci_getreg(p, offset);
+
+ if (reg->size == 8)
+ return ioread8(p->membase + (reg->offset << p->regshift));
+ else if (reg->size == 16)
+ return ioread16(p->membase + (reg->offset << p->regshift));
+ else
+ WARN(1, "Invalid register access\n");
+
+ return 0;
+}
+
+static void sci_serial_out(struct uart_port *p, int offset, int value)
+{
+ struct plat_sci_reg *reg = sci_getreg(p, offset);
+
+ if (reg->size == 8)
+ iowrite8(value, p->membase + (reg->offset << p->regshift));
+ else if (reg->size == 16)
+ iowrite16(value, p->membase + (reg->offset << p->regshift));
+ else
+ WARN(1, "Invalid register access\n");
+}
+
+#define sci_in(up, offset) (up->serial_in(up, offset))
+#define sci_out(up, offset, value) (up->serial_out(up, offset, value))
+
+static int sci_probe_regmap(struct plat_sci_port *cfg)
+{
+ switch (cfg->type) {
+ case PORT_SCI:
+ cfg->regtype = SCIx_SCI_REGTYPE;
+ break;
+ case PORT_IRDA:
+ cfg->regtype = SCIx_IRDA_REGTYPE;
+ break;
+ case PORT_SCIFA:
+ cfg->regtype = SCIx_SCIFA_REGTYPE;
+ break;
+ case PORT_SCIFB:
+ cfg->regtype = SCIx_SCIFB_REGTYPE;
+ break;
+ case PORT_SCIF:
+ /*
+ * The SH-4 is a bit of a misnomer here, although that's
+ * where this particular port layout originated. This
+ * configuration (or some slight variation thereof)
+ * remains the dominant model for all SCIFs.
+ */
+ cfg->regtype = SCIx_SH4_SCIF_REGTYPE;
+ break;
+ default:
+ printk(KERN_ERR "Can't probe register map for given port\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sci_port_enable(struct sci_port *sci_port)
+{
+ if (!sci_port->port.dev)
+ return;
+
+ pm_runtime_get_sync(sci_port->port.dev);
+
+ clk_enable(sci_port->iclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
+ clk_enable(sci_port->fclk);
+}
+
+static void sci_port_disable(struct sci_port *sci_port)
+{
+ if (!sci_port->port.dev)
+ return;
+
+ clk_disable(sci_port->fclk);
+ clk_disable(sci_port->iclk);
+
+ pm_runtime_put_sync(sci_port->port.dev);
+}
+
#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
#ifdef CONFIG_CONSOLE_POLL
@@ -164,223 +428,76 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
-#if defined(__H8300H__) || defined(__H8300S__)
static void sci_init_pins(struct uart_port *port, unsigned int cflag)
{
- int ch = (port->mapbase - SMR0) >> 3;
-
- /* set DDR regs */
- H8300_GPIO_DDR(h8300_sci_pins[ch].port,
- h8300_sci_pins[ch].rx,
- H8300_GPIO_INPUT);
- H8300_GPIO_DDR(h8300_sci_pins[ch].port,
- h8300_sci_pins[ch].tx,
- H8300_GPIO_OUTPUT);
-
- /* tx mark output*/
- H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- if (port->mapbase == 0xA4400000) {
- __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
- __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
- } else if (port->mapbase == 0xA4410000)
- __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- unsigned short data;
-
- if (cflag & CRTSCTS) {
- /* enable RTS/CTS */
- if (port->mapbase == 0xa4430000) { /* SCIF0 */
- /* Clear PTCR bit 9-2; enable all scif pins but sck */
- data = __raw_readw(PORT_PTCR);
- __raw_writew((data & 0xfc03), PORT_PTCR);
- } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
- /* Clear PVCR bit 9-2 */
- data = __raw_readw(PORT_PVCR);
- __raw_writew((data & 0xfc03), PORT_PVCR);
- }
- } else {
- if (port->mapbase == 0xa4430000) { /* SCIF0 */
- /* Clear PTCR bit 5-2; enable only tx and rx */
- data = __raw_readw(PORT_PTCR);
- __raw_writew((data & 0xffc3), PORT_PTCR);
- } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
- /* Clear PVCR bit 5-2 */
- data = __raw_readw(PORT_PVCR);
- __raw_writew((data & 0xffc3), PORT_PVCR);
- }
- }
-}
-#elif defined(CONFIG_CPU_SH3)
-/* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- unsigned short data;
-
- /* We need to set SCPCR to enable RTS/CTS */
- data = __raw_readw(SCPCR);
- /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
- __raw_writew(data & 0x0fcf, SCPCR);
-
- if (!(cflag & CRTSCTS)) {
- /* We need to set SCPCR to enable RTS/CTS */
- data = __raw_readw(SCPCR);
- /* Clear out SCP7MD1,0, SCP4MD1,0,
- Set SCP6MD1,0 = {01} (output) */
- __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
+ struct sci_port *s = to_sci_port(port);
+ struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
- data = __raw_readb(SCPDR);
- /* Set /RTS2 (bit6) = 0 */
- __raw_writeb(data & 0xbf, SCPDR);
+ /*
+ * Use port-specific handler if provided.
+ */
+ if (s->cfg->ops && s->cfg->ops->init_pins) {
+ s->cfg->ops->init_pins(port, cflag);
+ return;
}
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- unsigned short data;
- if (port->mapbase == 0xffe00000) {
- data = __raw_readw(PSCR);
- data &= ~0x03cf;
- if (!(cflag & CRTSCTS))
- data |= 0x0340;
+ /*
+ * For the generic path SCSPTR is necessary. Bail out if that's
+ * unavailable, too.
+ */
+ if (!reg->size)
+ return;
- __raw_writew(data, PSCR);
- }
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
- defined(CONFIG_CPU_SUBTYPE_SH7763) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786) || \
- defined(CONFIG_CPU_SUBTYPE_SHX3)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
if (!(cflag & CRTSCTS))
- __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */
+ sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */
}
-#elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- if (!(cflag & CRTSCTS))
- __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */
-}
-#else
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- /* Nothing to do */
-}
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-static int scif_txfill(struct uart_port *port)
-{
- return sci_in(port, SCTFDR) & 0xff;
-}
-
-static int scif_txroom(struct uart_port *port)
+static int sci_txfill(struct uart_port *port)
{
- return SCIF_TXROOM_MAX - scif_txfill(port);
-}
+ struct plat_sci_reg *reg;
-static int scif_rxfill(struct uart_port *port)
-{
- return sci_in(port, SCRFDR) & 0xff;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-static int scif_txfill(struct uart_port *port)
-{
- if (port->mapbase == 0xffe00000 ||
- port->mapbase == 0xffe08000)
- /* SCIF0/1*/
+ reg = sci_getreg(port, SCTFDR);
+ if (reg->size)
return sci_in(port, SCTFDR) & 0xff;
- else
- /* SCIF2 */
+
+ reg = sci_getreg(port, SCFDR);
+ if (reg->size)
return sci_in(port, SCFDR) >> 8;
-}
-static int scif_txroom(struct uart_port *port)
-{
- if (port->mapbase == 0xffe00000 ||
- port->mapbase == 0xffe08000)
- /* SCIF0/1*/
- return SCIF_TXROOM_MAX - scif_txfill(port);
- else
- /* SCIF2 */
- return SCIF2_TXROOM_MAX - scif_txfill(port);
+ return !(sci_in(port, SCxSR) & SCI_TDRE);
}
-static int scif_rxfill(struct uart_port *port)
-{
- if ((port->mapbase == 0xffe00000) ||
- (port->mapbase == 0xffe08000)) {
- /* SCIF0/1*/
- return sci_in(port, SCRFDR) & 0xff;
- } else {
- /* SCIF2 */
- return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
- }
-}
-#elif defined(CONFIG_ARCH_SH7372)
-static int scif_txfill(struct uart_port *port)
+static int sci_txroom(struct uart_port *port)
{
- if (port->type == PORT_SCIFA)
- return sci_in(port, SCFDR) >> 8;
- else
- return sci_in(port, SCTFDR);
+ return port->fifosize - sci_txfill(port);
}
-static int scif_txroom(struct uart_port *port)
+static int sci_rxfill(struct uart_port *port)
{
- return port->fifosize - scif_txfill(port);
-}
+ struct plat_sci_reg *reg;
-static int scif_rxfill(struct uart_port *port)
-{
- if (port->type == PORT_SCIFA)
- return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
- else
- return sci_in(port, SCRFDR);
-}
-#else
-static int scif_txfill(struct uart_port *port)
-{
- return sci_in(port, SCFDR) >> 8;
-}
+ reg = sci_getreg(port, SCRFDR);
+ if (reg->size)
+ return sci_in(port, SCRFDR) & 0xff;
-static int scif_txroom(struct uart_port *port)
-{
- return SCIF_TXROOM_MAX - scif_txfill(port);
-}
+ reg = sci_getreg(port, SCFDR);
+ if (reg->size)
+ return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1);
-static int scif_rxfill(struct uart_port *port)
-{
- return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
+ return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
}
-#endif
-static int sci_txfill(struct uart_port *port)
+/*
+ * SCI helper for checking the state of the muxed port/RXD pins.
+ */
+static inline int sci_rxd_in(struct uart_port *port)
{
- return !(sci_in(port, SCxSR) & SCI_TDRE);
-}
+ struct sci_port *s = to_sci_port(port);
-static int sci_txroom(struct uart_port *port)
-{
- return !sci_txfill(port);
-}
+ if (s->cfg->port_reg <= 0)
+ return 1;
-static int sci_rxfill(struct uart_port *port)
-{
- return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
+ return !!__raw_readb(s->cfg->port_reg);
}
/* ********************************************************************** *
@@ -406,10 +523,7 @@ static void sci_transmit_chars(struct uart_port *port)
return;
}
- if (port->type == PORT_SCI)
- count = sci_txroom(port);
- else
- count = scif_txroom(port);
+ count = sci_txroom(port);
do {
unsigned char c;
@@ -464,13 +578,8 @@ static void sci_receive_chars(struct uart_port *port)
return;
while (1) {
- if (port->type == PORT_SCI)
- count = sci_rxfill(port);
- else
- count = scif_rxfill(port);
-
/* Don't copy more bytes than there is room for in the buffer */
- count = tty_buffer_request_room(tty, count);
+ count = tty_buffer_request_room(tty, sci_rxfill(port));
/* If for any reason we can't copy more data, we're done! */
if (count == 0)
@@ -561,8 +670,7 @@ static void sci_break_timer(unsigned long data)
{
struct sci_port *port = (struct sci_port *)data;
- if (port->enable)
- port->enable(&port->port);
+ sci_port_enable(port);
if (sci_rxd_in(&port->port) == 0) {
port->break_flag = 1;
@@ -574,8 +682,7 @@ static void sci_break_timer(unsigned long data)
} else
port->break_flag = 0;
- if (port->disable)
- port->disable(&port->port);
+ sci_port_disable(port);
}
static int sci_handle_errors(struct uart_port *port)
@@ -583,13 +690,19 @@ static int sci_handle_errors(struct uart_port *port)
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
struct tty_struct *tty = port->state->port.tty;
+ struct sci_port *s = to_sci_port(port);
- if (status & SCxSR_ORER(port)) {
- /* overrun error */
- if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
- copied++;
+ /*
+ * Handle overruns, if supported.
+ */
+ if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
+ if (status & (1 << s->cfg->overrun_bit)) {
+ /* overrun error */
+ if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
+ copied++;
- dev_notice(port->dev, "overrun error");
+ dev_notice(port->dev, "overrun error");
+ }
}
if (status & SCxSR_FER(port)) {
@@ -637,12 +750,15 @@ static int sci_handle_errors(struct uart_port *port)
static int sci_handle_fifo_overrun(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
+ struct sci_port *s = to_sci_port(port);
+ struct plat_sci_reg *reg;
int copied = 0;
- if (port->type != PORT_SCIF)
+ reg = sci_getreg(port, SCLSR);
+ if (!reg->size)
return 0;
- if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) {
+ if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
sci_out(port, SCLSR, 0);
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
@@ -840,74 +956,102 @@ static int sci_notifier(struct notifier_block *self,
return NOTIFY_OK;
}
-static void sci_clk_enable(struct uart_port *port)
-{
- struct sci_port *sci_port = to_sci_port(port);
-
- pm_runtime_get_sync(port->dev);
+static struct sci_irq_desc {
+ const char *desc;
+ irq_handler_t handler;
+} sci_irq_desc[] = {
+ /*
+ * Split out handlers, the default case.
+ */
+ [SCIx_ERI_IRQ] = {
+ .desc = "rx err",
+ .handler = sci_er_interrupt,
+ },
- clk_enable(sci_port->iclk);
- sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
- clk_enable(sci_port->fclk);
-}
+ [SCIx_RXI_IRQ] = {
+ .desc = "rx full",
+ .handler = sci_rx_interrupt,
+ },
-static void sci_clk_disable(struct uart_port *port)
-{
- struct sci_port *sci_port = to_sci_port(port);
+ [SCIx_TXI_IRQ] = {
+ .desc = "tx empty",
+ .handler = sci_tx_interrupt,
+ },
- clk_disable(sci_port->fclk);
- clk_disable(sci_port->iclk);
+ [SCIx_BRI_IRQ] = {
+ .desc = "break",
+ .handler = sci_br_interrupt,
+ },
- pm_runtime_put_sync(port->dev);
-}
+ /*
+ * Special muxed handler.
+ */
+ [SCIx_MUX_IRQ] = {
+ .desc = "mux",
+ .handler = sci_mpxed_interrupt,
+ },
+};
static int sci_request_irq(struct sci_port *port)
{
- int i;
- irqreturn_t (*handlers[4])(int irq, void *ptr) = {
- sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt,
- sci_br_interrupt,
- };
- const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
- "SCI Transmit Data Empty", "SCI Break" };
-
- if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
- if (unlikely(!port->cfg->irqs[0]))
- return -ENODEV;
-
- if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
- IRQF_DISABLED, "sci", port)) {
- dev_err(port->port.dev, "Can't allocate IRQ\n");
- return -ENODEV;
+ struct uart_port *up = &port->port;
+ int i, j, ret = 0;
+
+ for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
+ struct sci_irq_desc *desc;
+ unsigned int irq;
+
+ if (SCIx_IRQ_IS_MUXED(port)) {
+ i = SCIx_MUX_IRQ;
+ irq = up->irq;
+ } else
+ irq = port->cfg->irqs[i];
+
+ desc = sci_irq_desc + i;
+ port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
+ dev_name(up->dev), desc->desc);
+ if (!port->irqstr[j]) {
+ dev_err(up->dev, "Failed to allocate %s IRQ string\n",
+ desc->desc);
+ goto out_nomem;
}
- } else {
- for (i = 0; i < ARRAY_SIZE(handlers); i++) {
- if (unlikely(!port->cfg->irqs[i]))
- continue;
-
- if (request_irq(port->cfg->irqs[i], handlers[i],
- IRQF_DISABLED, desc[i], port)) {
- dev_err(port->port.dev, "Can't allocate IRQ\n");
- return -ENODEV;
- }
+
+ ret = request_irq(irq, desc->handler, up->irqflags,
+ port->irqstr[j], port);
+ if (unlikely(ret)) {
+ dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
+ goto out_noirq;
}
}
return 0;
+
+out_noirq:
+ while (--i >= 0)
+ free_irq(port->cfg->irqs[i], port);
+
+out_nomem:
+ while (--j >= 0)
+ kfree(port->irqstr[j]);
+
+ return ret;
}
static void sci_free_irq(struct sci_port *port)
{
int i;
- if (port->cfg->irqs[0] == port->cfg->irqs[1])
- free_irq(port->cfg->irqs[0], port);
- else {
- for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
- if (!port->cfg->irqs[i])
- continue;
+ /*
+ * Intentionally in reverse order so we iterate over the muxed
+ * IRQ first.
+ */
+ for (i = 0; i < SCIx_NR_IRQS; i++) {
+ free_irq(port->cfg->irqs[i], port);
+ kfree(port->irqstr[i]);
- free_irq(port->cfg->irqs[i], port);
+ if (SCIx_IRQ_IS_MUXED(port)) {
+ /* If there's only one IRQ, we're done. */
+ return;
}
}
}
@@ -915,7 +1059,7 @@ static void sci_free_irq(struct sci_port *port)
static unsigned int sci_tx_empty(struct uart_port *port)
{
unsigned short status = sci_in(port, SCxSR);
- unsigned short in_tx_fifo = scif_txfill(port);
+ unsigned short in_tx_fifo = sci_txfill(port);
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
}
@@ -1438,8 +1582,7 @@ static int sci_startup(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
- if (s->enable)
- s->enable(port);
+ sci_port_enable(s);
ret = sci_request_irq(s);
if (unlikely(ret < 0))
@@ -1465,8 +1608,7 @@ static void sci_shutdown(struct uart_port *port)
sci_free_dma(port);
sci_free_irq(s);
- if (s->disable)
- s->disable(port);
+ sci_port_disable(s);
}
static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
@@ -1513,8 +1655,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if (likely(baud && port->uartclk))
t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
- if (s->enable)
- s->enable(port);
+ sci_port_enable(s);
do {
status = sci_in(port, SCxSR);
@@ -1584,8 +1725,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
- if (s->disable)
- s->disable(port);
+ sci_port_disable(s);
}
static const char *sci_type(struct uart_port *port)
@@ -1726,6 +1866,7 @@ static int __devinit sci_init_single(struct platform_device *dev,
struct plat_sci_port *p)
{
struct uart_port *port = &sci_port->port;
+ int ret;
port->ops = &sci_uart_ops;
port->iotype = UPIO_MEM;
@@ -1746,6 +1887,12 @@ static int __devinit sci_init_single(struct platform_device *dev,
break;
}
+ if (p->regtype == SCIx_PROBE_REGTYPE) {
+ ret = sci_probe_regmap(p);
+ if (unlikely(ret))
+ return ret;
+ }
+
if (dev) {
sci_port->iclk = clk_get(&dev->dev, "sci_ick");
if (IS_ERR(sci_port->iclk)) {
@@ -1764,8 +1911,6 @@ static int __devinit sci_init_single(struct platform_device *dev,
if (IS_ERR(sci_port->fclk))
sci_port->fclk = NULL;
- sci_port->enable = sci_clk_enable;
- sci_port->disable = sci_clk_disable;
port->dev = &dev->dev;
pm_runtime_enable(&dev->dev);
@@ -1775,20 +1920,51 @@ static int __devinit sci_init_single(struct platform_device *dev,
sci_port->break_timer.function = sci_break_timer;
init_timer(&sci_port->break_timer);
+ /*
+ * Establish some sensible defaults for the error detection.
+ */
+ if (!p->error_mask)
+ p->error_mask = (p->type == PORT_SCI) ?
+ SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
+
+ /*
+ * Establish sensible defaults for the overrun detection, unless
+ * the part has explicitly disabled support for it.
+ */
+ if (p->overrun_bit != SCIx_NOT_SUPPORTED) {
+ if (p->type == PORT_SCI)
+ p->overrun_bit = 5;
+ else if (p->scbrr_algo_id == SCBRR_ALGO_4)
+ p->overrun_bit = 9;
+ else
+ p->overrun_bit = 0;
+
+ /*
+ * Make the error mask inclusive of overrun detection, if
+ * supported.
+ */
+ p->error_mask |= (1 << p->overrun_bit);
+ }
+
sci_port->cfg = p;
port->mapbase = p->mapbase;
port->type = p->type;
port->flags = p->flags;
+ port->regshift = p->regshift;
/*
- * The UART port needs an IRQ value, so we peg this to the TX IRQ
+ * The UART port needs an IRQ value, so we peg this to the RX IRQ
* for the multi-IRQ ports, which is where we are primarily
* concerned with the shutdown path synchronization.
*
* For the muxed case there's nothing more to do.
*/
port->irq = p->irqs[SCIx_RXI_IRQ];
+ port->irqflags = IRQF_DISABLED;
+
+ port->serial_in = sci_serial_in;
+ port->serial_out = sci_serial_out;
if (p->dma_dev)
dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
@@ -1814,8 +1990,7 @@ static void serial_console_write(struct console *co, const char *s,
struct uart_port *port = &sci_port->port;
unsigned short bits;
- if (sci_port->enable)
- sci_port->enable(port);
+ sci_port_enable(sci_port);
uart_console_write(port, s, count, serial_console_putchar);
@@ -1824,8 +1999,7 @@ static void serial_console_write(struct console *co, const char *s,
while ((sci_in(port, SCxSR) & bits) != bits)
cpu_relax();
- if (sci_port->disable)
- sci_port->disable(port);
+ sci_port_disable(sci_port);
}
static int __devinit serial_console_setup(struct console *co, char *options)
@@ -1857,20 +2031,13 @@ static int __devinit serial_console_setup(struct console *co, char *options)
if (unlikely(ret != 0))
return ret;
- if (sci_port->enable)
- sci_port->enable(port);
+ sci_port_enable(sci_port);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- ret = uart_set_options(port, co, baud, parity, bits, flow);
-#if defined(__H8300H__) || defined(__H8300S__)
- /* disable rx interrupt */
- if (ret == 0)
- sci_stop_rx(port);
-#endif
/* TODO: disable clock */
- return ret;
+ return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct console serial_console = {
@@ -2081,3 +2248,5 @@ module_exit(sci_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sh-sci");
+MODULE_AUTHOR("Paul Mundt");
+MODULE_DESCRIPTION("SuperH SCI(F) serial driver");
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index b04d937c911..e9bed038aa1 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -2,169 +2,14 @@
#include <linux/io.h>
#include <linux/gpio.h>
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
-#include <asm/regs306x.h>
-#endif
-#if defined(CONFIG_H8S2678)
-#include <asm/regs267x.h>
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7708) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
-# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */
-# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
-# define SCIF0 0xA4400000
-# define SCIF2 0xA4410000
-# define SCPCR 0xA4000116
-# define SCPDR 0xA4000136
-#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-# define PORT_PTCR 0xA405011EUL
-# define PORT_PVCR 0xA4050122UL
-# define SCIF_ORER 0x0200 /* overrun error bit */
-#elif defined(CONFIG_SH_RTS7751R2D)
-# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
-# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
- defined(CONFIG_CPU_SUBTYPE_SH7091) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751R)
-# define SCSPTR1 0xffe0001c /* 8 bit SCI */
-# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
-# define SCSPTR0 0xfe600024 /* 16 bit SCIF */
-# define SCSPTR1 0xfe610024 /* 16 bit SCIF */
-# define SCSPTR2 0xfe620024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-# define SCSPTR0 0xA4400000 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-# define PACR 0xa4050100
-# define PBCR 0xa4050102
-#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
-# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
-# define PADR 0xA4050120
-# define PSDR 0xA405013e
-# define PWDR 0xA4050166
-# define PSCR 0xA405011E
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
-# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
-# define SCSPTR0 SCPDR0
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
-# define SCSPTR0 0xa4050160
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
-# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
-# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
-#elif defined(CONFIG_H8S2678)
-# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
-# define SCSPTR0 0xfe4b0020
-# define SCIF_ORER 0x0001
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
-# define SCSPTR0 0xff923020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
-# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* Overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* Overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
- defined(CONFIG_CPU_SUBTYPE_SH7203) || \
- defined(CONFIG_CPU_SUBTYPE_SH7206) || \
- defined(CONFIG_CPU_SUBTYPE_SH7263)
-# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
-# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
-# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* Overrun error bit */
-#else
-# error CPU subtype not defined
-#endif
-
-/* SCxSR SCI */
-#define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_ORER 0x20 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_FER 0x10 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_PER 0x08 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_TEND 0x04 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/* SCI_MPB 0x02 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/* SCI_MPBT 0x01 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-
-#define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ORER)
-
-/* SCxSR SCIF */
-#define SCIF_ER 0x0080 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_TEND 0x0040 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_TDFE 0x0020 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_BRK 0x0010 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_FER 0x0008 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_PER 0x0004 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-# define SCIF_ORER 0x0200
-# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER)
-# define SCIF_RFDC_MASK 0x007f
-# define SCIF_TXROOM_MAX 64
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK )
-# define SCIF_RFDC_MASK 0x007f
-# define SCIF_TXROOM_MAX 64
-/* SH7763 SCIF2 support */
-# define SCIF2_RFDC_MASK 0x001f
-# define SCIF2_TXROOM_MAX 16
-#else
-# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
-# define SCIF_RFDC_MASK 0x001f
-# define SCIF_TXROOM_MAX 16
-#endif
-
-#ifndef SCIF_ORER
-#define SCIF_ORER 0x0000
-#endif
-
#define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND)
-#define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS)
#define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF)
#define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE)
#define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER)
#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER)
#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK)
-#define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER)
+
+#define SCxSR_ERRORS(port) (to_sci_port(port)->cfg->error_mask)
#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
@@ -191,278 +36,3 @@
#define SCI_MAJOR 204
#define SCI_MINOR_START 8
-
-#define SCI_IN(size, offset) \
- if ((size) == 8) { \
- return ioread8(port->membase + (offset)); \
- } else { \
- return ioread16(port->membase + (offset)); \
- }
-#define SCI_OUT(size, offset, value) \
- if ((size) == 8) { \
- iowrite8(value, port->membase + (offset)); \
- } else if ((size) == 16) { \
- iowrite16(value, port->membase + (offset)); \
- }
-
-#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\
- static inline unsigned int sci_##name##_in(struct uart_port *port) \
- { \
- if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
- SCI_IN(scif_size, scif_offset) \
- } else { /* PORT_SCI or PORT_SCIFA */ \
- SCI_IN(sci_size, sci_offset); \
- } \
- } \
- static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
- { \
- if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
- SCI_OUT(scif_size, scif_offset, value) \
- } else { /* PORT_SCI or PORT_SCIFA */ \
- SCI_OUT(sci_size, sci_offset, value); \
- } \
- }
-
-#ifdef CONFIG_H8300
-/* h8300 don't have SCIF */
-#define CPU_SCIF_FNS(name) \
- static inline unsigned int sci_##name##_in(struct uart_port *port) \
- { \
- return 0; \
- } \
- static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
- { \
- }
-#else
-#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
- static inline unsigned int sci_##name##_in(struct uart_port *port) \
- { \
- SCI_IN(scif_size, scif_offset); \
- } \
- static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
- { \
- SCI_OUT(scif_size, scif_offset, value); \
- }
-#endif
-
-#define CPU_SCI_FNS(name, sci_offset, sci_size) \
- static inline unsigned int sci_##name##_in(struct uart_port* port) \
- { \
- SCI_IN(sci_size, sci_offset); \
- } \
- static inline void sci_##name##_out(struct uart_port* port, unsigned int value) \
- { \
- SCI_OUT(sci_size, sci_offset, value); \
- }
-
-#if defined(CONFIG_CPU_SH3) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH7367)
-#define SCIF_FNS(name, scif_offset, scif_size) \
- CPU_SCIF_FNS(name, scif_offset, scif_size)
-#elif defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372) || \
- defined(CONFIG_ARCH_SH73A0)
-#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \
- CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size)
-#define SCIF_FNS(name, scif_offset, scif_size) \
- CPU_SCIF_FNS(name, scif_offset, scif_size)
-#else
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh3_scif_offset, sh3_scif_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh3_scif_offset, sh3_scif_size)
-#endif
-#elif defined(__H8300H__) || defined(__H8300S__)
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
- defined(CONFIG_CPU_SUBTYPE_SH7724)
- #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size)
- #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
-#else
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH7367)
-
-SCIF_FNS(SCSMR, 0x00, 16)
-SCIF_FNS(SCBRR, 0x04, 8)
-SCIF_FNS(SCSCR, 0x08, 16)
-SCIF_FNS(SCxSR, 0x14, 16)
-SCIF_FNS(SCFCR, 0x18, 16)
-SCIF_FNS(SCFDR, 0x1c, 16)
-SCIF_FNS(SCxTDR, 0x20, 8)
-SCIF_FNS(SCxRDR, 0x24, 8)
-SCIF_FNS(SCLSR, 0x00, 0)
-#elif defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372) || \
- defined(CONFIG_ARCH_SH73A0)
-SCIF_FNS(SCSMR, 0x00, 16)
-SCIF_FNS(SCBRR, 0x04, 8)
-SCIF_FNS(SCSCR, 0x08, 16)
-SCIF_FNS(SCTDSR, 0x0c, 16)
-SCIF_FNS(SCFER, 0x10, 16)
-SCIF_FNS(SCxSR, 0x14, 16)
-SCIF_FNS(SCFCR, 0x18, 16)
-SCIF_FNS(SCFDR, 0x1c, 16)
-SCIF_FNS(SCTFDR, 0x38, 16)
-SCIF_FNS(SCRFDR, 0x3c, 16)
-SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8)
-SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8)
-SCIF_FNS(SCLSR, 0x00, 0)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
- defined(CONFIG_CPU_SUBTYPE_SH7724)
-SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
-SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8)
-SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
-SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8)
-SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16)
-SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8)
-SCIx_FNS(SCSPTR, 0, 0, 0, 0)
-SCIF_FNS(SCFCR, 0x18, 16)
-SCIF_FNS(SCFDR, 0x1c, 16)
-SCIF_FNS(SCLSR, 0x24, 16)
-#else
-/* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 SCI/H8*/
-/* name off sz off sz off sz off sz off sz*/
-SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16, 0x00, 8)
-SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8, 0x01, 8)
-SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16, 0x02, 8)
-SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8, 0x03, 8)
-SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8)
-SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8)
-SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16)
-#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
-SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
-SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
-SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
-SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
-SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
-SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
-SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
-SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
-#else
-SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
-#if defined(CONFIG_CPU_SUBTYPE_SH7722)
-SCIF_FNS(SCSPTR, 0, 0, 0, 0)
-#else
-SCIF_FNS(SCSPTR, 0, 0, 0x20, 16)
-#endif
-SCIF_FNS(SCLSR, 0, 0, 0x24, 16)
-#endif
-#endif
-#define sci_in(port, reg) sci_##reg##_in(port)
-#define sci_out(port, reg, value) sci_##reg##_out(port, value)
-
-/* H8/300 series SCI pins assignment */
-#if defined(__H8300H__) || defined(__H8300S__)
-static const struct __attribute__((packed)) {
- int port; /* GPIO port no */
- unsigned short rx,tx; /* GPIO bit no */
-} h8300_sci_pins[] = {
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
- { /* SCI0 */
- .port = H8300_GPIO_P9,
- .rx = H8300_GPIO_B2,
- .tx = H8300_GPIO_B0,
- },
- { /* SCI1 */
- .port = H8300_GPIO_P9,
- .rx = H8300_GPIO_B3,
- .tx = H8300_GPIO_B1,
- },
- { /* SCI2 */
- .port = H8300_GPIO_PB,
- .rx = H8300_GPIO_B7,
- .tx = H8300_GPIO_B6,
- }
-#elif defined(CONFIG_H8S2678)
- { /* SCI0 */
- .port = H8300_GPIO_P3,
- .rx = H8300_GPIO_B2,
- .tx = H8300_GPIO_B0,
- },
- { /* SCI1 */
- .port = H8300_GPIO_P3,
- .rx = H8300_GPIO_B3,
- .tx = H8300_GPIO_B1,
- },
- { /* SCI2 */
- .port = H8300_GPIO_P5,
- .rx = H8300_GPIO_B1,
- .tx = H8300_GPIO_B0,
- }
-#endif
-};
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7708) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
-static inline int sci_rxd_in(struct uart_port *port)
-{
- if (port->mapbase == 0xfffffe80)
- return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
- return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
- defined(CONFIG_CPU_SUBTYPE_SH7091)
-static inline int sci_rxd_in(struct uart_port *port)
-{
- if (port->mapbase == 0xffe00000)
- return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
- return 1;
-}
-#elif defined(__H8300H__) || defined(__H8300S__)
-static inline int sci_rxd_in(struct uart_port *port)
-{
- int ch = (port->mapbase - SMR0) >> 3;
- return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0;
-}
-#else /* default case for non-SCI processors */
-static inline int sci_rxd_in(struct uart_port *port)
-{
- return 1;
-}
-#endif
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 1e54b8b7f69..278aeaa9250 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -335,6 +335,13 @@ config BACKLIGHT_PCF50633
If you have a backlight driven by a NXP PCF50633 MFD, say Y here to
enable its driver.
+config BACKLIGHT_AAT2870
+ tristate "AnalogicTech AAT2870 Backlight"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE
+ help
+ If you have a AnalogicTech AAT2870 say Y to enable the
+ backlight driver.
+
endif # BACKLIGHT_CLASS_DEVICE
endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index bf1dd92b752..fdd1fc4b277 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -38,4 +38,5 @@ obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
+obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
new file mode 100644
index 00000000000..331f1ef1dad
--- /dev/null
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -0,0 +1,246 @@
+/*
+ * linux/drivers/video/backlight/aat2870_bl.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/mfd/aat2870.h>
+
+struct aat2870_bl_driver_data {
+ struct platform_device *pdev;
+ struct backlight_device *bd;
+
+ int channels;
+ int max_current;
+ int brightness; /* current brightness */
+};
+
+static inline int aat2870_brightness(struct aat2870_bl_driver_data *aat2870_bl,
+ int brightness)
+{
+ struct backlight_device *bd = aat2870_bl->bd;
+ int val;
+
+ val = brightness * (aat2870_bl->max_current - 1);
+ val /= bd->props.max_brightness;
+
+ return val;
+}
+
+static inline int aat2870_bl_enable(struct aat2870_bl_driver_data *aat2870_bl)
+{
+ struct aat2870_data *aat2870
+ = dev_get_drvdata(aat2870_bl->pdev->dev.parent);
+
+ return aat2870->write(aat2870, AAT2870_BL_CH_EN,
+ (u8)aat2870_bl->channels);
+}
+
+static inline int aat2870_bl_disable(struct aat2870_bl_driver_data *aat2870_bl)
+{
+ struct aat2870_data *aat2870
+ = dev_get_drvdata(aat2870_bl->pdev->dev.parent);
+
+ return aat2870->write(aat2870, AAT2870_BL_CH_EN, 0x0);
+}
+
+static int aat2870_bl_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static int aat2870_bl_update_status(struct backlight_device *bd)
+{
+ struct aat2870_bl_driver_data *aat2870_bl = dev_get_drvdata(&bd->dev);
+ struct aat2870_data *aat2870 =
+ dev_get_drvdata(aat2870_bl->pdev->dev.parent);
+ int brightness = bd->props.brightness;
+ int ret;
+
+ if ((brightness < 0) || (bd->props.max_brightness < brightness)) {
+ dev_err(&bd->dev, "invalid brightness, %d\n", brightness);
+ return -EINVAL;
+ }
+
+ dev_dbg(&bd->dev, "brightness=%d, power=%d, state=%d\n",
+ bd->props.brightness, bd->props.power, bd->props.state);
+
+ if ((bd->props.power != FB_BLANK_UNBLANK) ||
+ (bd->props.state & BL_CORE_FBBLANK) ||
+ (bd->props.state & BL_CORE_SUSPENDED))
+ brightness = 0;
+
+ ret = aat2870->write(aat2870, AAT2870_BLM,
+ (u8)aat2870_brightness(aat2870_bl, brightness));
+ if (ret < 0)
+ return ret;
+
+ if (brightness == 0) {
+ ret = aat2870_bl_disable(aat2870_bl);
+ if (ret < 0)
+ return ret;
+ } else if (aat2870_bl->brightness == 0) {
+ ret = aat2870_bl_enable(aat2870_bl);
+ if (ret < 0)
+ return ret;
+ }
+
+ aat2870_bl->brightness = brightness;
+
+ return 0;
+}
+
+static int aat2870_bl_check_fb(struct backlight_device *bd, struct fb_info *fi)
+{
+ return 1;
+}
+
+static const struct backlight_ops aat2870_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = aat2870_bl_get_brightness,
+ .update_status = aat2870_bl_update_status,
+ .check_fb = aat2870_bl_check_fb,
+};
+
+static int aat2870_bl_probe(struct platform_device *pdev)
+{
+ struct aat2870_bl_platform_data *pdata = pdev->dev.platform_data;
+ struct aat2870_bl_driver_data *aat2870_bl;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ int ret = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (pdev->id != AAT2870_ID_BL) {
+ dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ aat2870_bl = kzalloc(sizeof(struct aat2870_bl_driver_data), GFP_KERNEL);
+ if (!aat2870_bl) {
+ dev_err(&pdev->dev,
+ "Failed to allocate memory for aat2870 backlight\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+
+ props.type = BACKLIGHT_RAW;
+ bd = backlight_device_register("aat2870-backlight", &pdev->dev,
+ aat2870_bl, &aat2870_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ dev_err(&pdev->dev,
+ "Failed allocate memory for backlight device\n");
+ ret = PTR_ERR(bd);
+ goto out_kfree;
+ }
+
+ aat2870_bl->pdev = pdev;
+ platform_set_drvdata(pdev, aat2870_bl);
+
+ aat2870_bl->bd = bd;
+
+ if (pdata->channels > 0)
+ aat2870_bl->channels = pdata->channels;
+ else
+ aat2870_bl->channels = AAT2870_BL_CH_ALL;
+
+ if (pdata->max_current > 0)
+ aat2870_bl->max_current = pdata->max_current;
+ else
+ aat2870_bl->max_current = AAT2870_CURRENT_27_9;
+
+ if (pdata->max_brightness > 0)
+ bd->props.max_brightness = pdata->max_brightness;
+ else
+ bd->props.max_brightness = 255;
+
+ aat2870_bl->brightness = 0;
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.brightness = bd->props.max_brightness;
+
+ ret = aat2870_bl_update_status(bd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize\n");
+ goto out_bl_dev_unregister;
+ }
+
+ return 0;
+
+out_bl_dev_unregister:
+ backlight_device_unregister(bd);
+out_kfree:
+ kfree(aat2870_bl);
+out:
+ return ret;
+}
+
+static int aat2870_bl_remove(struct platform_device *pdev)
+{
+ struct aat2870_bl_driver_data *aat2870_bl = platform_get_drvdata(pdev);
+ struct backlight_device *bd = aat2870_bl->bd;
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.brightness = 0;
+ backlight_update_status(bd);
+
+ backlight_device_unregister(bd);
+ kfree(aat2870_bl);
+
+ return 0;
+}
+
+static struct platform_driver aat2870_bl_driver = {
+ .driver = {
+ .name = "aat2870-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_bl_probe,
+ .remove = aat2870_bl_remove,
+};
+
+static int __init aat2870_bl_init(void)
+{
+ return platform_driver_register(&aat2870_bl_driver);
+}
+subsys_initcall(aat2870_bl_init);
+
+static void __exit aat2870_bl_exit(void)
+{
+ platform_driver_unregister(&aat2870_bl_driver);
+}
+module_exit(aat2870_bl_exit);
+
+MODULE_DESCRIPTION("AnalogicTech AAT2870 Backlight");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index fdd5d4ae437..4e888ac09b3 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -504,14 +504,18 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev)
return 0;
r = omapdss_dsi_display_enable(dssdev);
- if (r)
- goto err;
+ if (r) {
+ dev_err(&dssdev->dev, "failed to enable DSI\n");
+ goto err1;
+ }
omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
r = _taal_enable_te(dssdev, true);
- if (r)
- goto err;
+ if (r) {
+ dev_err(&dssdev->dev, "failed to re-enable TE");
+ goto err2;
+ }
enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
@@ -521,13 +525,15 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev)
return 0;
-err:
- dev_err(&dssdev->dev, "exit ULPS failed");
- r = taal_panel_reset(dssdev);
-
- enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
- td->ulps_enabled = false;
+err2:
+ dev_err(&dssdev->dev, "failed to exit ULPS");
+ r = taal_panel_reset(dssdev);
+ if (!r) {
+ enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
+ td->ulps_enabled = false;
+ }
+err1:
taal_queue_ulps_work(dssdev);
return r;
@@ -1241,11 +1247,8 @@ static void taal_power_off(struct omap_dss_device *dssdev)
int r;
r = taal_dcs_write_0(td, DCS_DISPLAY_OFF);
- if (!r) {
+ if (!r)
r = taal_sleep_in(td);
- /* HACK: wait a bit so that the message goes through */
- msleep(10);
- }
if (r) {
dev_err(&dssdev->dev,
@@ -1317,8 +1320,11 @@ static void taal_disable(struct omap_dss_device *dssdev)
dsi_bus_lock(dssdev);
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- taal_wake_up(dssdev);
- taal_power_off(dssdev);
+ int r;
+
+ r = taal_wake_up(dssdev);
+ if (!r)
+ taal_power_off(dssdev);
}
dsi_bus_unlock(dssdev);
@@ -1897,20 +1903,6 @@ err:
mutex_unlock(&td->lock);
}
-static int taal_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- if (mode != OMAP_DSS_UPDATE_MANUAL)
- return -EINVAL;
- return 0;
-}
-
-static enum omap_dss_update_mode taal_get_update_mode(
- struct omap_dss_device *dssdev)
-{
- return OMAP_DSS_UPDATE_MANUAL;
-}
-
static struct omap_dss_driver taal_driver = {
.probe = taal_probe,
.remove = __exit_p(taal_remove),
@@ -1920,9 +1912,6 @@ static struct omap_dss_driver taal_driver = {
.suspend = taal_suspend,
.resume = taal_resume,
- .set_update_mode = taal_set_update_mode,
- .get_update_mode = taal_get_update_mode,
-
.update = taal_update,
.sync = taal_sync,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 6b3e2da1141..0d12524db14 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -117,18 +117,6 @@ config OMAP2_DSS_MIN_FCK_PER_PCK
Max FCK is 173MHz, so this doesn't work if your PCK
is very high.
-config OMAP2_DSS_SLEEP_BEFORE_RESET
- bool "Sleep 50ms before DSS reset"
- default y
- help
- For some unknown reason we may get SYNC_LOST errors from the display
- subsystem at initialization time if we don't sleep before resetting
- the DSS. See the source (dss.c) for more comments.
-
- However, 50ms is quite long time to sleep, and with some
- configurations the SYNC_LOST may never happen, so the sleep can
- be disabled here.
-
config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
bool "Sleep 20ms after VENC reset"
default y
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 3da426719dd..76821fefce9 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -183,8 +183,11 @@ static int omap_dss_probe(struct platform_device *pdev)
goto err_dss;
}
- /* keep clocks enabled to prevent context saves/restores during init */
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dispc_init_platform_driver();
+ if (r) {
+ DSSERR("Failed to initialize dispc platform driver\n");
+ goto err_dispc;
+ }
r = rfbi_init_platform_driver();
if (r) {
@@ -192,12 +195,6 @@ static int omap_dss_probe(struct platform_device *pdev)
goto err_rfbi;
}
- r = dispc_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize dispc platform driver\n");
- goto err_dispc;
- }
-
r = venc_init_platform_driver();
if (r) {
DSSERR("Failed to initialize venc platform driver\n");
@@ -238,8 +235,6 @@ static int omap_dss_probe(struct platform_device *pdev)
pdata->default_device = dssdev;
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
-
return 0;
err_register:
@@ -268,11 +263,11 @@ static int omap_dss_remove(struct platform_device *pdev)
dss_uninitialize_debugfs();
+ hdmi_uninit_platform_driver();
+ dsi_uninit_platform_driver();
venc_uninit_platform_driver();
- dispc_uninit_platform_driver();
rfbi_uninit_platform_driver();
- dsi_uninit_platform_driver();
- hdmi_uninit_platform_driver();
+ dispc_uninit_platform_driver();
dss_uninit_platform_driver();
dss_uninit_overlays(pdev);
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 7a9a2e7d968..0f3961a1ce2 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -33,6 +33,8 @@
#include <linux/workqueue.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <plat/sram.h>
#include <plat/clock.h>
@@ -77,6 +79,12 @@ struct dispc_v_coef {
s8 vc00;
};
+enum omap_burst_size {
+ BURST_SIZE_X2 = 0,
+ BURST_SIZE_X4 = 1,
+ BURST_SIZE_X8 = 2,
+};
+
#define REG_GET(idx, start, end) \
FLD_GET(dispc_read_reg(idx), start, end)
@@ -92,7 +100,11 @@ struct dispc_irq_stats {
static struct {
struct platform_device *pdev;
void __iomem *base;
+
+ int ctx_loss_cnt;
+
int irq;
+ struct clk *dss_clk;
u32 fifo_size[3];
@@ -102,6 +114,7 @@ static struct {
u32 error_irqs;
struct work_struct error_work;
+ bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -134,18 +147,34 @@ static inline u32 dispc_read_reg(const u16 idx)
return __raw_readl(dispc.base + idx);
}
+static int dispc_get_ctx_loss_count(void)
+{
+ struct device *dev = &dispc.pdev->dev;
+ struct omap_display_platform_data *pdata = dev->platform_data;
+ struct omap_dss_board_info *board_data = pdata->board_data;
+ int cnt;
+
+ if (!board_data->get_context_loss_count)
+ return -ENOENT;
+
+ cnt = board_data->get_context_loss_count(dev);
+
+ WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+ return cnt;
+}
+
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
-void dispc_save_context(void)
+static void dispc_save_context(void)
{
int i;
- if (cpu_is_omap24xx())
- return;
- SR(SYSCONFIG);
+ DSSDBG("dispc_save_context\n");
+
SR(IRQENABLE);
SR(CONTROL);
SR(CONFIG);
@@ -158,7 +187,8 @@ void dispc_save_context(void)
SR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
SR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- SR(GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ SR(GLOBAL_ALPHA);
SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -188,20 +218,25 @@ void dispc_save_context(void)
SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ }
SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
}
- SR(OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_GFX));
/* VID1 */
SR(OVL_BA0(OMAP_DSS_VIDEO1));
@@ -226,8 +261,10 @@ void dispc_save_context(void)
for (i = 0; i < 5; i++)
SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
SR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -248,7 +285,8 @@ void dispc_save_context(void)
if (dss_has_feature(FEAT_ATTR2))
SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
/* VID2 */
SR(OVL_BA0(OMAP_DSS_VIDEO2));
@@ -273,8 +311,10 @@ void dispc_save_context(void)
for (i = 0; i < 5; i++)
SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
SR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -295,16 +335,35 @@ void dispc_save_context(void)
if (dss_has_feature(FEAT_ATTR2))
SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
+
+ dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
+ dispc.ctx_valid = true;
+
+ DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
}
-void dispc_restore_context(void)
+static void dispc_restore_context(void)
{
- int i;
- RR(SYSCONFIG);
+ int i, ctx;
+
+ DSSDBG("dispc_restore_context\n");
+
+ if (!dispc.ctx_valid)
+ return;
+
+ ctx = dispc_get_ctx_loss_count();
+
+ if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
+ return;
+
+ DSSDBG("ctx_loss_count: saved %d, current %d\n",
+ dispc.ctx_loss_cnt, ctx);
+
/*RR(IRQENABLE);*/
/*RR(CONTROL);*/
RR(CONFIG);
@@ -317,7 +376,8 @@ void dispc_restore_context(void)
RR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
RR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- RR(GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ RR(GLOBAL_ALPHA);
RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -347,20 +407,25 @@ void dispc_restore_context(void)
RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ }
}
- RR(OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_GFX));
/* VID1 */
RR(OVL_BA0(OMAP_DSS_VIDEO1));
@@ -385,8 +450,10 @@ void dispc_restore_context(void)
for (i = 0; i < 5; i++)
RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
RR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -407,7 +474,8 @@ void dispc_restore_context(void)
if (dss_has_feature(FEAT_ATTR2))
RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
/* VID2 */
RR(OVL_BA0(OMAP_DSS_VIDEO2));
@@ -432,8 +500,10 @@ void dispc_restore_context(void)
for (i = 0; i < 5; i++)
RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
RR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -454,7 +524,8 @@ void dispc_restore_context(void)
if (dss_has_feature(FEAT_ATTR2))
RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
RR(DIVISOR);
@@ -471,19 +542,35 @@ void dispc_restore_context(void)
* the context is fully restored
*/
RR(IRQENABLE);
+
+ DSSDBG("context restored\n");
}
#undef SR
#undef RR
-static inline void enable_clocks(bool enable)
+int dispc_runtime_get(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+
+ DSSDBG("dispc_runtime_get\n");
+
+ r = pm_runtime_get_sync(&dispc.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
}
+void dispc_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("dispc_runtime_put\n");
+
+ r = pm_runtime_put(&dispc.pdev->dev);
+ WARN_ON(r < 0);
+}
+
+
bool dispc_go_busy(enum omap_channel channel)
{
int bit;
@@ -505,8 +592,6 @@ void dispc_go(enum omap_channel channel)
int bit;
bool enable_bit, go_bit;
- enable_clocks(1);
-
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
bit = 0; /* LCDENABLE */
@@ -520,7 +605,7 @@ void dispc_go(enum omap_channel channel)
enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
if (!enable_bit)
- goto end;
+ return;
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
@@ -535,7 +620,7 @@ void dispc_go(enum omap_channel channel)
if (go_bit) {
DSSERR("GO bit not down for channel %d\n", channel);
- goto end;
+ return;
}
DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
@@ -545,8 +630,6 @@ void dispc_go(enum omap_channel channel)
REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
else
REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
-end:
- enable_clocks(0);
}
static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
@@ -920,7 +1003,7 @@ static void _dispc_set_color_mode(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
-static void _dispc_set_channel_out(enum omap_plane plane,
+void dispc_set_channel_out(enum omap_plane plane,
enum omap_channel channel)
{
int shift;
@@ -967,13 +1050,10 @@ static void _dispc_set_channel_out(enum omap_plane plane,
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
-void dispc_set_burst_size(enum omap_plane plane,
+static void dispc_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size)
{
int shift;
- u32 val;
-
- enable_clocks(1);
switch (plane) {
case OMAP_DSS_GFX:
@@ -988,11 +1068,24 @@ void dispc_set_burst_size(enum omap_plane plane,
return;
}
- val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
- val = FLD_MOD(val, burst_size, shift+1, shift);
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift);
+}
- enable_clocks(0);
+static void dispc_configure_burst_sizes(void)
+{
+ int i;
+ const int burst_size = BURST_SIZE_X8;
+
+ /* Configure burst size always to maximum size */
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i)
+ dispc_set_burst_size(i, burst_size);
+}
+
+u32 dispc_get_burst_size(enum omap_plane plane)
+{
+ unsigned unit = dss_feat_get_burst_size_unit();
+ /* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
+ return unit * 8;
}
void dispc_enable_gamma_table(bool enable)
@@ -1009,6 +1102,40 @@ void dispc_enable_gamma_table(bool enable)
REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
}
+void dispc_enable_cpr(enum omap_channel channel, bool enable)
+{
+ u16 reg;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD)
+ reg = DISPC_CONFIG;
+ else if (channel == OMAP_DSS_CHANNEL_LCD2)
+ reg = DISPC_CONFIG2;
+ else
+ return;
+
+ REG_FLD_MOD(reg, enable, 15, 15);
+}
+
+void dispc_set_cpr_coef(enum omap_channel channel,
+ struct omap_dss_cpr_coefs *coefs)
+{
+ u32 coef_r, coef_g, coef_b;
+
+ if (channel != OMAP_DSS_CHANNEL_LCD && channel != OMAP_DSS_CHANNEL_LCD2)
+ return;
+
+ coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
+ FLD_VAL(coefs->rb, 9, 0);
+ coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) |
+ FLD_VAL(coefs->gb, 9, 0);
+ coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) |
+ FLD_VAL(coefs->bb, 9, 0);
+
+ dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r);
+ dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g);
+ dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b);
+}
+
static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
{
u32 val;
@@ -1029,9 +1156,7 @@ void dispc_enable_replication(enum omap_plane plane, bool enable)
else
bit = 10;
- enable_clocks(1);
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
- enable_clocks(0);
}
void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
@@ -1039,9 +1164,7 @@ void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_SIZE_MGR(channel), val);
- enable_clocks(0);
}
void dispc_set_digit_size(u16 width, u16 height)
@@ -1049,9 +1172,7 @@ void dispc_set_digit_size(u16 width, u16 height)
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
- enable_clocks(0);
}
static void dispc_read_plane_fifo_sizes(void)
@@ -1059,18 +1180,17 @@ static void dispc_read_plane_fifo_sizes(void)
u32 size;
int plane;
u8 start, end;
+ u32 unit;
- enable_clocks(1);
+ unit = dss_feat_get_buffer_size_unit();
dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
- size = FLD_GET(dispc_read_reg(DISPC_OVL_FIFO_SIZE_STATUS(plane)),
- start, end);
+ size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(plane), start, end);
+ size *= unit;
dispc.fifo_size[plane] = size;
}
-
- enable_clocks(0);
}
u32 dispc_get_plane_fifo_size(enum omap_plane plane)
@@ -1078,15 +1198,22 @@ u32 dispc_get_plane_fifo_size(enum omap_plane plane)
return dispc.fifo_size[plane];
}
-void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
+void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
+ u32 unit;
+
+ unit = dss_feat_get_buffer_size_unit();
+
+ WARN_ON(low % unit != 0);
+ WARN_ON(high % unit != 0);
+
+ low /= unit;
+ high /= unit;
dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
- enable_clocks(1);
-
DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
plane,
REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
@@ -1098,18 +1225,12 @@ void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
-
- enable_clocks(0);
}
void dispc_enable_fifomerge(bool enable)
{
- enable_clocks(1);
-
DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
-
- enable_clocks(0);
}
static void _dispc_set_fir(enum omap_plane plane,
@@ -1729,14 +1850,7 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
return dispc_pclk_rate(channel) * vf * hf;
}
-void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
-{
- enable_clocks(1);
- _dispc_set_channel_out(plane, channel_out);
- enable_clocks(0);
-}
-
-static int _dispc_setup_plane(enum omap_plane plane,
+int dispc_setup_plane(enum omap_plane plane,
u32 paddr, u16 screen_width,
u16 pos_x, u16 pos_y,
u16 width, u16 height,
@@ -1744,7 +1858,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
enum omap_color_mode color_mode,
bool ilace,
enum omap_dss_rotation_type rotation_type,
- u8 rotation, int mirror,
+ u8 rotation, bool mirror,
u8 global_alpha, u8 pre_mult_alpha,
enum omap_channel channel, u32 puv_addr)
{
@@ -1758,6 +1872,14 @@ static int _dispc_setup_plane(enum omap_plane plane,
u16 frame_height = height;
unsigned int field_offset = 0;
+ DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
+ "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
+ plane, paddr, screen_width, pos_x, pos_y,
+ width, height,
+ out_width, out_height,
+ ilace, color_mode,
+ rotation, mirror, channel);
+
if (paddr == 0)
return -EINVAL;
@@ -1903,9 +2025,13 @@ static int _dispc_setup_plane(enum omap_plane plane,
return 0;
}
-static void _dispc_enable_plane(enum omap_plane plane, bool enable)
+int dispc_enable_plane(enum omap_plane plane, bool enable)
{
+ DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
+
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
+
+ return 0;
}
static void dispc_disable_isr(void *data, u32 mask)
@@ -1929,8 +2055,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
int r;
u32 irq;
- enable_clocks(1);
-
/* When we disable LCD output, we need to wait until frame is done.
* Otherwise the DSS is still working, and turning off the clocks
* prevents DSS from going to OFF mode */
@@ -1964,8 +2088,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
if (r)
DSSERR("failed to unregister FRAMEDONE isr\n");
}
-
- enable_clocks(0);
}
static void _enable_digit_out(bool enable)
@@ -1978,12 +2100,8 @@ static void dispc_enable_digit_out(bool enable)
struct completion frame_done_completion;
int r;
- enable_clocks(1);
-
- if (REG_GET(DISPC_CONTROL, 1, 1) == enable) {
- enable_clocks(0);
+ if (REG_GET(DISPC_CONTROL, 1, 1) == enable)
return;
- }
if (enable) {
unsigned long flags;
@@ -2035,8 +2153,6 @@ static void dispc_enable_digit_out(bool enable)
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
}
-
- enable_clocks(0);
}
bool dispc_is_channel_enabled(enum omap_channel channel)
@@ -2067,9 +2183,7 @@ void dispc_lcd_enable_signal_polarity(bool act_high)
if (!dss_has_feature(FEAT_LCDENABLEPOL))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
- enable_clocks(0);
}
void dispc_lcd_enable_signal(bool enable)
@@ -2077,9 +2191,7 @@ void dispc_lcd_enable_signal(bool enable)
if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
- enable_clocks(0);
}
void dispc_pck_free_enable(bool enable)
@@ -2087,19 +2199,15 @@ void dispc_pck_free_enable(bool enable)
if (!dss_has_feature(FEAT_PCKFREEENABLE))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
- enable_clocks(0);
}
void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
else
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
- enable_clocks(0);
}
@@ -2122,27 +2230,21 @@ void dispc_set_lcd_display_type(enum omap_channel channel,
return;
}
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
else
REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
- enable_clocks(0);
}
void dispc_set_loadmode(enum omap_dss_load_mode mode)
{
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
- enable_clocks(0);
}
void dispc_set_default_color(enum omap_channel channel, u32 color)
{
- enable_clocks(1);
dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
- enable_clocks(0);
}
u32 dispc_get_default_color(enum omap_channel channel)
@@ -2153,9 +2255,7 @@ u32 dispc_get_default_color(enum omap_channel channel)
channel != OMAP_DSS_CHANNEL_LCD &&
channel != OMAP_DSS_CHANNEL_LCD2);
- enable_clocks(1);
l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
- enable_clocks(0);
return l;
}
@@ -2164,7 +2264,6 @@ void dispc_set_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2173,14 +2272,12 @@ void dispc_set_trans_key(enum omap_channel ch,
REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
- enable_clocks(0);
}
void dispc_get_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type *type,
u32 *trans_key)
{
- enable_clocks(1);
if (type) {
if (ch == OMAP_DSS_CHANNEL_LCD)
*type = REG_GET(DISPC_CONFIG, 11, 11);
@@ -2194,33 +2291,28 @@ void dispc_get_trans_key(enum omap_channel ch,
if (trans_key)
*trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
- enable_clocks(0);
}
void dispc_enable_trans_key(enum omap_channel ch, bool enable)
{
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
- enable_clocks(0);
}
void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
{
if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18);
- enable_clocks(0);
}
bool dispc_alpha_blending_enabled(enum omap_channel ch)
{
@@ -2229,7 +2321,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return false;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2238,7 +2329,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
enabled = REG_GET(DISPC_CONFIG2, 18, 18);
else
BUG();
- enable_clocks(0);
return enabled;
}
@@ -2248,7 +2338,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
{
bool enabled;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2257,7 +2346,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
enabled = REG_GET(DISPC_CONFIG2, 10, 10);
else
BUG();
- enable_clocks(0);
return enabled;
}
@@ -2285,12 +2373,10 @@ void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
return;
}
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
else
REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
- enable_clocks(0);
}
void dispc_set_parallel_interface_mode(enum omap_channel channel,
@@ -2322,8 +2408,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel,
return;
}
- enable_clocks(1);
-
if (channel == OMAP_DSS_CHANNEL_LCD2) {
l = dispc_read_reg(DISPC_CONTROL2);
l = FLD_MOD(l, stallmode, 11, 11);
@@ -2335,8 +2419,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel,
l = FLD_MOD(l, gpout1, 16, 16);
dispc_write_reg(DISPC_CONTROL, l);
}
-
- enable_clocks(0);
}
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
@@ -2389,10 +2471,8 @@ static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
FLD_VAL(vbp, 31, 20);
}
- enable_clocks(1);
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
- enable_clocks(0);
}
/* change name to mode? */
@@ -2435,10 +2515,8 @@ static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 2);
- enable_clocks(1);
dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
- enable_clocks(0);
}
static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
@@ -2457,7 +2535,7 @@ unsigned long dispc_fclk_rate(void)
switch (dss_get_dispc_clk_source()) {
case OMAP_DSS_CLK_SRC_FCK:
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -2487,7 +2565,7 @@ unsigned long dispc_lclk_rate(enum omap_channel channel)
switch (dss_get_lcd_clk_source(channel)) {
case OMAP_DSS_CLK_SRC_FCK:
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -2526,7 +2604,8 @@ void dispc_dump_clocks(struct seq_file *s)
enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
enum omap_dss_clk_source lcd_clk_src;
- enable_clocks(1);
+ if (dispc_runtime_get())
+ return;
seq_printf(s, "- DISPC -\n");
@@ -2574,7 +2653,8 @@ void dispc_dump_clocks(struct seq_file *s)
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
}
- enable_clocks(0);
+
+ dispc_runtime_put();
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -2629,7 +2709,8 @@ void dispc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dispc_runtime_get())
+ return;
DUMPREG(DISPC_REVISION);
DUMPREG(DISPC_SYSCONFIG);
@@ -2649,7 +2730,8 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ DUMPREG(DISPC_GLOBAL_ALPHA);
DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -2680,20 +2762,25 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ }
}
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1));
DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1));
@@ -2744,14 +2831,16 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -2812,14 +2901,17 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
+
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -2858,10 +2950,12 @@ void dispc_dump_regs(struct seq_file *s)
if (dss_has_feature(FEAT_ATTR2))
DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_PRELOAD)) {
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ }
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
#undef DUMPREG
}
@@ -2882,9 +2976,7 @@ static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf,
l |= FLD_VAL(acbi, 11, 8);
l |= FLD_VAL(acb, 7, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_POL_FREQ(channel), l);
- enable_clocks(0);
}
void dispc_set_pol_freq(enum omap_channel channel,
@@ -3005,15 +3097,11 @@ static void _omap_dispc_set_irqs(void)
mask |= isr_data->mask;
}
- enable_clocks(1);
-
old_mask = dispc_read_reg(DISPC_IRQENABLE);
/* clear the irqstatus for newly enabled irqs */
dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
dispc_write_reg(DISPC_IRQENABLE, mask);
-
- enable_clocks(0);
}
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
@@ -3522,13 +3610,6 @@ static void _omap_dispc_initial_config(void)
{
u32 l;
- l = dispc_read_reg(DISPC_SYSCONFIG);
- l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */
- l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */
- l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */
- l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
- dispc_write_reg(DISPC_SYSCONFIG, l);
-
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
l = dispc_read_reg(DISPC_DIVISOR);
@@ -3552,58 +3633,8 @@ static void _omap_dispc_initial_config(void)
dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
dispc_read_plane_fifo_sizes();
-}
-int dispc_enable_plane(enum omap_plane plane, bool enable)
-{
- DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
-
- enable_clocks(1);
- _dispc_enable_plane(plane, enable);
- enable_clocks(0);
-
- return 0;
-}
-
-int dispc_setup_plane(enum omap_plane plane,
- u32 paddr, u16 screen_width,
- u16 pos_x, u16 pos_y,
- u16 width, u16 height,
- u16 out_width, u16 out_height,
- enum omap_color_mode color_mode,
- bool ilace,
- enum omap_dss_rotation_type rotation_type,
- u8 rotation, bool mirror, u8 global_alpha,
- u8 pre_mult_alpha, enum omap_channel channel,
- u32 puv_addr)
-{
- int r = 0;
-
- DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> "
- "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
- plane, paddr, screen_width, pos_x, pos_y,
- width, height,
- out_width, out_height,
- ilace, color_mode,
- rotation, mirror, channel);
-
- enable_clocks(1);
-
- r = _dispc_setup_plane(plane,
- paddr, screen_width,
- pos_x, pos_y,
- width, height,
- out_width, out_height,
- color_mode, ilace,
- rotation_type,
- rotation, mirror,
- global_alpha,
- pre_mult_alpha,
- channel, puv_addr);
-
- enable_clocks(0);
-
- return r;
+ dispc_configure_burst_sizes();
}
/* DISPC HW IP initialisation */
@@ -3612,9 +3643,19 @@ static int omap_dispchw_probe(struct platform_device *pdev)
u32 rev;
int r = 0;
struct resource *dispc_mem;
+ struct clk *clk;
dispc.pdev = pdev;
+ clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get fck\n");
+ r = PTR_ERR(clk);
+ goto err_get_clk;
+ }
+
+ dispc.dss_clk = clk;
+
spin_lock_init(&dispc.irq_lock);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -3628,62 +3669,103 @@ static int omap_dispchw_probe(struct platform_device *pdev)
if (!dispc_mem) {
DSSERR("can't get IORESOURCE_MEM DISPC\n");
r = -EINVAL;
- goto fail0;
+ goto err_ioremap;
}
dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem));
if (!dispc.base) {
DSSERR("can't ioremap DISPC\n");
r = -ENOMEM;
- goto fail0;
+ goto err_ioremap;
}
dispc.irq = platform_get_irq(dispc.pdev, 0);
if (dispc.irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto fail1;
+ goto err_irq;
}
r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED,
"OMAP DISPC", dispc.pdev);
if (r < 0) {
DSSERR("request_irq failed\n");
- goto fail1;
+ goto err_irq;
}
- enable_clocks(1);
+ pm_runtime_enable(&pdev->dev);
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_runtime_get;
_omap_dispc_initial_config();
_omap_dispc_initialize_irq();
- dispc_save_context();
-
rev = dispc_read_reg(DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- enable_clocks(0);
+ dispc_runtime_put();
return 0;
-fail1:
+
+err_runtime_get:
+ pm_runtime_disable(&pdev->dev);
+ free_irq(dispc.irq, dispc.pdev);
+err_irq:
iounmap(dispc.base);
-fail0:
+err_ioremap:
+ clk_put(dispc.dss_clk);
+err_get_clk:
return r;
}
static int omap_dispchw_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
+
+ clk_put(dispc.dss_clk);
+
free_irq(dispc.irq, dispc.pdev);
iounmap(dispc.base);
return 0;
}
+static int dispc_runtime_suspend(struct device *dev)
+{
+ dispc_save_context();
+ clk_disable(dispc.dss_clk);
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int dispc_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ return r;
+
+ clk_enable(dispc.dss_clk);
+ dispc_restore_context();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dispc_pm_ops = {
+ .runtime_suspend = dispc_runtime_suspend,
+ .runtime_resume = dispc_runtime_resume,
+};
+
static struct platform_driver omap_dispchw_driver = {
.probe = omap_dispchw_probe,
.remove = omap_dispchw_remove,
.driver = {
.name = "omapdss_dispc",
.owner = THIS_MODULE,
+ .pm = &dispc_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index c2dfc8c5005..94495e45ec5 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -29,6 +29,7 @@
#include <video/omapdss.h>
#include "dss.h"
+#include "dss_features.h"
static ssize_t display_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -65,48 +66,6 @@ static ssize_t display_enabled_store(struct device *dev,
return size;
}
-static ssize_t display_upd_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO;
- if (dssdev->driver->get_update_mode)
- mode = dssdev->driver->get_update_mode(dssdev);
- return snprintf(buf, PAGE_SIZE, "%d\n", mode);
-}
-
-static ssize_t display_upd_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int val, r;
- enum omap_dss_update_mode mode;
-
- if (!dssdev->driver->set_update_mode)
- return -EINVAL;
-
- r = kstrtoint(buf, 0, &val);
- if (r)
- return r;
-
- switch (val) {
- case OMAP_DSS_UPDATE_DISABLED:
- case OMAP_DSS_UPDATE_AUTO:
- case OMAP_DSS_UPDATE_MANUAL:
- mode = (enum omap_dss_update_mode)val;
- break;
- default:
- return -EINVAL;
- }
-
- r = dssdev->driver->set_update_mode(dssdev, mode);
- if (r)
- return r;
-
- return size;
-}
-
static ssize_t display_tear_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -294,8 +253,6 @@ static ssize_t display_wss_store(struct device *dev,
static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
display_enabled_show, display_enabled_store);
-static DEVICE_ATTR(update_mode, S_IRUGO|S_IWUSR,
- display_upd_mode_show, display_upd_mode_store);
static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
display_tear_show, display_tear_store);
static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
@@ -309,7 +266,6 @@ static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
static struct device_attribute *display_sysfs_attrs[] = {
&dev_attr_enabled,
- &dev_attr_update_mode,
&dev_attr_tear_elim,
&dev_attr_timings,
&dev_attr_rotate,
@@ -327,16 +283,13 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
EXPORT_SYMBOL(omapdss_default_get_resolution);
void default_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high)
{
- unsigned burst_size_bytes;
-
- *burst_size = OMAP_DSS_BURST_16x32;
- burst_size_bytes = 16 * 32 / 8;
+ unsigned buf_unit = dss_feat_get_buffer_size_unit();
- *fifo_high = fifo_size - 1;
- *fifo_low = fifo_size - burst_size_bytes;
+ *fifo_high = fifo_size - buf_unit;
+ *fifo_low = fifo_size - burst_size;
}
int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index ff6bd30132d..f053b180ecd 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -23,7 +23,6 @@
#define DSS_SUBSYS_NAME "DPI"
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -130,8 +129,6 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
bool is_tft;
int r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
-
dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
@@ -144,7 +141,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000,
&fck, &lck_div, &pck_div);
if (r)
- goto err0;
+ return r;
pck = fck / lck_div / pck_div / 1000;
@@ -158,12 +155,10 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
dispc_set_lcd_timings(dssdev->manager->id, t);
-err0:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
- return r;
+ return 0;
}
-static int dpi_basic_init(struct omap_dss_device *dssdev)
+static void dpi_basic_init(struct omap_dss_device *dssdev)
{
bool is_tft;
@@ -175,8 +170,6 @@ static int dpi_basic_init(struct omap_dss_device *dssdev)
OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
dispc_set_tft_data_lines(dssdev->manager->id,
dssdev->phy.dpi.data_lines);
-
- return 0;
}
int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
@@ -186,31 +179,38 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
if (cpu_is_omap34xx()) {
r = regulator_enable(dpi.vdds_dsi_reg);
if (r)
- goto err1;
+ goto err_reg_enable;
}
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
- r = dpi_basic_init(dssdev);
+ r = dispc_runtime_get();
if (r)
- goto err2;
+ goto err_get_dispc;
+
+ dpi_basic_init(dssdev);
if (dpi_use_dsi_pll(dssdev)) {
- dss_clk_enable(DSS_CLK_SYSCK);
+ r = dsi_runtime_get(dpi.dsidev);
+ if (r)
+ goto err_get_dsi;
+
r = dsi_pll_init(dpi.dsidev, 0, 1);
if (r)
- goto err3;
+ goto err_dsi_pll_init;
}
r = dpi_set_mode(dssdev);
if (r)
- goto err4;
+ goto err_set_mode;
mdelay(2);
@@ -218,19 +218,22 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
return 0;
-err4:
+err_set_mode:
if (dpi_use_dsi_pll(dssdev))
dsi_pll_uninit(dpi.dsidev, true);
-err3:
+err_dsi_pll_init:
if (dpi_use_dsi_pll(dssdev))
- dss_clk_disable(DSS_CLK_SYSCK);
-err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dsi_runtime_put(dpi.dsidev);
+err_get_dsi:
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
-err1:
+err_reg_enable:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
return r;
}
EXPORT_SYMBOL(omapdss_dpi_display_enable);
@@ -242,10 +245,11 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
if (dpi_use_dsi_pll(dssdev)) {
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dsi_pll_uninit(dpi.dsidev, true);
- dss_clk_disable(DSS_CLK_SYSCK);
+ dsi_runtime_put(dpi.dsidev);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
+ dss_runtime_put();
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
@@ -257,11 +261,26 @@ EXPORT_SYMBOL(omapdss_dpi_display_disable);
void dpi_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
+ int r;
+
DSSDBG("dpi_set_timings\n");
dssdev->panel.timings = *timings;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ r = dss_runtime_get();
+ if (r)
+ return;
+
+ r = dispc_runtime_get();
+ if (r) {
+ dss_runtime_put();
+ return;
+ }
+
dpi_set_mode(dssdev);
dispc_go(dssdev->manager->id);
+
+ dispc_runtime_put();
+ dss_runtime_put();
}
}
EXPORT_SYMBOL(dpi_set_timings);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 345757cfcbe..7adbbeb8433 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -36,6 +36,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/clock.h>
@@ -267,8 +268,12 @@ struct dsi_isr_tables {
struct dsi_data {
struct platform_device *pdev;
void __iomem *base;
+
int irq;
+ struct clk *dss_clk;
+ struct clk *sys_clk;
+
void (*dsi_mux_pads)(bool enable);
struct dsi_clock_info current_cinfo;
@@ -389,15 +394,6 @@ static inline u32 dsi_read_reg(struct platform_device *dsidev,
return __raw_readl(dsi->base + idx.idx);
}
-
-void dsi_save_context(void)
-{
-}
-
-void dsi_restore_context(void)
-{
-}
-
void dsi_bus_lock(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -493,9 +489,18 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
total_bytes * 1000 / total_us);
}
#else
-#define dsi_perf_mark_setup(x)
-#define dsi_perf_mark_start(x)
-#define dsi_perf_show(x, y)
+static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
+{
+}
+
+static inline void dsi_perf_mark_start(struct platform_device *dsidev)
+{
+}
+
+static inline void dsi_perf_show(struct platform_device *dsidev,
+ const char *name)
+{
+}
#endif
static void print_irq_status(u32 status)
@@ -1039,13 +1044,27 @@ static u32 dsi_get_errors(struct platform_device *dsidev)
return e;
}
-/* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
-static inline void enable_clocks(bool enable)
+int dsi_runtime_get(struct platform_device *dsidev)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+
+ DSSDBG("dsi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&dsi->pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+void dsi_runtime_put(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ int r;
+
+ DSSDBG("dsi_runtime_put\n");
+
+ r = pm_runtime_put(&dsi->pdev->dev);
+ WARN_ON(r < 0);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
@@ -1055,9 +1074,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (enable)
- dss_clk_enable(DSS_CLK_SYSCK);
+ clk_enable(dsi->sys_clk);
else
- dss_clk_disable(DSS_CLK_SYSCK);
+ clk_disable(dsi->sys_clk);
if (enable && dsi->pll_locked) {
if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
@@ -1150,10 +1169,11 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
{
unsigned long r;
int dsi_module = dsi_get_dsidev_id(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dsi->dss_clk);
} else {
/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
@@ -1262,7 +1282,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
return -EINVAL;
if (cinfo->use_sys_clk) {
- cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
+ cinfo->clkin = clk_get_rate(dsi->sys_clk);
/* XXX it is unclear if highfreq should be used
* with DSS_SYS_CLK source also */
cinfo->highfreq = 0;
@@ -1311,7 +1331,7 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
int match = 0;
unsigned long dss_sys_clk, max_dss_fck;
- dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
+ dss_sys_clk = clk_get_rate(dsi->sys_clk);
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
@@ -1601,7 +1621,6 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
dsi->vdds_dsi_reg = vdds_dsi;
}
- enable_clocks(1);
dsi_enable_pll_clock(dsidev, 1);
/*
* Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
@@ -1653,7 +1672,6 @@ err1:
}
err0:
dsi_disable_scp_clk(dsidev);
- enable_clocks(0);
dsi_enable_pll_clock(dsidev, 0);
return r;
}
@@ -1671,7 +1689,6 @@ void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
}
dsi_disable_scp_clk(dsidev);
- enable_clocks(0);
dsi_enable_pll_clock(dsidev, 0);
DSSDBG("PLL uninit done\n");
@@ -1688,7 +1705,8 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
dispc_clk_src = dss_get_dispc_clk_source();
dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
- enable_clocks(1);
+ if (dsi_runtime_get(dsidev))
+ return;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
@@ -1731,7 +1749,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
}
void dsi_dump_clocks(struct seq_file *s)
@@ -1873,7 +1891,8 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dsi_runtime_get(dsidev))
+ return;
dsi_enable_scp_clk(dsidev);
DUMPREG(DSI_REVISION);
@@ -1947,7 +1966,7 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
DUMPREG(DSI_PLL_CONFIGURATION2);
dsi_disable_scp_clk(dsidev);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dsi_runtime_put(dsidev);
#undef DUMPREG
}
@@ -2463,28 +2482,6 @@ static void dsi_cio_uninit(struct platform_device *dsidev)
dsi->dsi_mux_pads(false);
}
-static int _dsi_wait_reset(struct platform_device *dsidev)
-{
- int t = 0;
-
- while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
- if (++t > 5) {
- DSSERR("soft reset failed\n");
- return -ENODEV;
- }
- udelay(1);
- }
-
- return 0;
-}
-
-static int _dsi_reset(struct platform_device *dsidev)
-{
- /* Soft reset */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
- return _dsi_wait_reset(dsidev);
-}
-
static void dsi_config_tx_fifo(struct platform_device *dsidev,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
@@ -3386,6 +3383,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
+ /* Reset LANEx_ULPS_SIG2 */
+ REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2),
+ 7, 5);
+
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
dsi_if_enable(dsidev, false);
@@ -4198,22 +4199,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
dsi_pll_uninit(dsidev, disconnect_lanes);
}
-static int dsi_core_init(struct platform_device *dsidev)
-{
- /* Autoidle */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
-
- /* ENWAKEUP */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
-
- /* SIDLEMODE smart-idle */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
-
- _dsi_initialize_irq(dsidev);
-
- return 0;
-}
-
int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -4229,37 +4214,37 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
- enable_clocks(1);
- dsi_enable_pll_clock(dsidev, 1);
-
- r = _dsi_reset(dsidev);
+ r = dsi_runtime_get(dsidev);
if (r)
- goto err1;
+ goto err_get_dsi;
- dsi_core_init(dsidev);
+ dsi_enable_pll_clock(dsidev, 1);
+
+ _dsi_initialize_irq(dsidev);
r = dsi_display_init_dispc(dssdev);
if (r)
- goto err1;
+ goto err_init_dispc;
r = dsi_display_init_dsi(dssdev);
if (r)
- goto err2;
+ goto err_init_dsi;
mutex_unlock(&dsi->lock);
return 0;
-err2:
+err_init_dsi:
dsi_display_uninit_dispc(dssdev);
-err1:
- enable_clocks(0);
+err_init_dispc:
dsi_enable_pll_clock(dsidev, 0);
+ dsi_runtime_put(dsidev);
+err_get_dsi:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
return r;
@@ -4278,11 +4263,16 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
mutex_lock(&dsi->lock);
+ dsi_sync_vc(dsidev, 0);
+ dsi_sync_vc(dsidev, 1);
+ dsi_sync_vc(dsidev, 2);
+ dsi_sync_vc(dsidev, 3);
+
dsi_display_uninit_dispc(dssdev);
dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
dsi_enable_pll_clock(dsidev, 0);
omap_dss_stop_device(dssdev);
@@ -4302,16 +4292,11 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
EXPORT_SYMBOL(omapdss_dsi_enable_te);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high)
{
- unsigned burst_size_bytes;
-
- *burst_size = OMAP_DSS_BURST_16x32;
- burst_size_bytes = 16 * 32 / 8;
-
- *fifo_high = fifo_size - burst_size_bytes;
- *fifo_low = fifo_size - burst_size_bytes * 2;
+ *fifo_high = fifo_size - burst_size;
+ *fifo_low = fifo_size - burst_size * 2;
}
int dsi_init_display(struct omap_dss_device *dssdev)
@@ -4437,7 +4422,47 @@ static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
}
-static int dsi_init(struct platform_device *dsidev)
+static int dsi_get_clocks(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct clk *clk;
+
+ clk = clk_get(&dsidev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get fck\n");
+ return PTR_ERR(clk);
+ }
+
+ dsi->dss_clk = clk;
+
+ if (cpu_is_omap34xx() || cpu_is_omap3630())
+ clk = clk_get(&dsidev->dev, "dss2_alwon_fck");
+ else
+ clk = clk_get(&dsidev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ clk_put(dsi->dss_clk);
+ dsi->dss_clk = NULL;
+ return PTR_ERR(clk);
+ }
+
+ dsi->sys_clk = clk;
+
+ return 0;
+}
+
+static void dsi_put_clocks(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+
+ if (dsi->dss_clk)
+ clk_put(dsi->dss_clk);
+ if (dsi->sys_clk)
+ clk_put(dsi->sys_clk);
+}
+
+/* DSI1 HW IP initialisation */
+static int omap_dsi1hw_probe(struct platform_device *dsidev)
{
struct omap_display_platform_data *dss_plat_data;
struct omap_dss_board_info *board_info;
@@ -4449,7 +4474,7 @@ static int dsi_init(struct platform_device *dsidev)
dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi) {
r = -ENOMEM;
- goto err0;
+ goto err_alloc;
}
dsi->pdev = dsidev;
@@ -4472,6 +4497,12 @@ static int dsi_init(struct platform_device *dsidev)
mutex_init(&dsi->lock);
sema_init(&dsi->bus_lock, 1);
+ r = dsi_get_clocks(dsidev);
+ if (r)
+ goto err_get_clk;
+
+ pm_runtime_enable(&dsidev->dev);
+
INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
dsi_framedone_timeout_work_callback);
@@ -4484,26 +4515,26 @@ static int dsi_init(struct platform_device *dsidev)
if (!dsi_mem) {
DSSERR("can't get IORESOURCE_MEM DSI\n");
r = -EINVAL;
- goto err1;
+ goto err_ioremap;
}
dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
if (!dsi->base) {
DSSERR("can't ioremap DSI\n");
r = -ENOMEM;
- goto err1;
+ goto err_ioremap;
}
dsi->irq = platform_get_irq(dsi->pdev, 0);
if (dsi->irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err2;
+ goto err_get_irq;
}
r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
dev_name(&dsidev->dev), dsi->pdev);
if (r < 0) {
DSSERR("request_irq failed\n");
- goto err2;
+ goto err_get_irq;
}
/* DSI VCs initialization */
@@ -4515,7 +4546,9 @@ static int dsi_init(struct platform_device *dsidev)
dsi_calc_clock_param_ranges(dsidev);
- enable_clocks(1);
+ r = dsi_runtime_get(dsidev);
+ if (r)
+ goto err_get_dsi;
rev = dsi_read_reg(dsidev, DSI_REVISION);
dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
@@ -4523,21 +4556,32 @@ static int dsi_init(struct platform_device *dsidev)
dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
return 0;
-err2:
+
+err_get_dsi:
+ free_irq(dsi->irq, dsi->pdev);
+err_get_irq:
iounmap(dsi->base);
-err1:
+err_ioremap:
+ pm_runtime_disable(&dsidev->dev);
+err_get_clk:
kfree(dsi);
-err0:
+err_alloc:
return r;
}
-static void dsi_exit(struct platform_device *dsidev)
+static int omap_dsi1hw_remove(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ WARN_ON(dsi->scp_clk_refcount > 0);
+
+ pm_runtime_disable(&dsidev->dev);
+
+ dsi_put_clocks(dsidev);
+
if (dsi->vdds_dsi_reg != NULL) {
if (dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
@@ -4553,38 +4597,56 @@ static void dsi_exit(struct platform_device *dsidev)
kfree(dsi);
- DSSDBG("omap_dsi_exit\n");
+ return 0;
}
-/* DSI1 HW IP initialisation */
-static int omap_dsi1hw_probe(struct platform_device *dsidev)
+static int dsi_runtime_suspend(struct device *dev)
{
- int r;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
- r = dsi_init(dsidev);
- if (r) {
- DSSERR("Failed to initialize DSI\n");
- goto err_dsi;
- }
-err_dsi:
- return r;
+ clk_disable(dsi->dss_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
}
-static int omap_dsi1hw_remove(struct platform_device *dsidev)
+static int dsi_runtime_resume(struct device *dev)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
+ int r;
+
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
+
+ clk_enable(dsi->dss_clk);
- dsi_exit(dsidev);
- WARN_ON(dsi->scp_clk_refcount > 0);
return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
}
+static const struct dev_pm_ops dsi_pm_ops = {
+ .runtime_suspend = dsi_runtime_suspend,
+ .runtime_resume = dsi_runtime_resume,
+};
+
static struct platform_driver omap_dsi1hw_driver = {
.probe = omap_dsi1hw_probe,
.remove = omap_dsi1hw_remove,
.driver = {
.name = "omapdss_dsi1",
.owner = THIS_MODULE,
+ .pm = &dsi_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index d9489d5c4f0..0f9c3a6457a 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -28,6 +28,8 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/clock.h>
@@ -59,15 +61,9 @@ struct dss_reg {
static struct {
struct platform_device *pdev;
void __iomem *base;
- int ctx_id;
struct clk *dpll4_m4_ck;
- struct clk *dss_ick;
- struct clk *dss_fck;
- struct clk *dss_sys_clk;
- struct clk *dss_tv_fck;
- struct clk *dss_video_fck;
- unsigned num_clks_enabled;
+ struct clk *dss_clk;
unsigned long cache_req_pck;
unsigned long cache_prate;
@@ -78,6 +74,7 @@ static struct {
enum omap_dss_clk_source dispc_clk_source;
enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
+ bool ctx_valid;
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
@@ -87,13 +84,6 @@ static const char * const dss_generic_clk_source_names[] = {
[OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
};
-static void dss_clk_enable_all_no_ctx(void);
-static void dss_clk_disable_all_no_ctx(void);
-static void dss_clk_enable_no_ctx(enum dss_clock clks);
-static void dss_clk_disable_no_ctx(enum dss_clock clks);
-
-static int _omap_dss_wait_reset(void);
-
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
{
__raw_writel(val, dss.base + idx.idx);
@@ -109,12 +99,10 @@ static inline u32 dss_read_reg(const struct dss_reg idx)
#define RR(reg) \
dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
-void dss_save_context(void)
+static void dss_save_context(void)
{
- if (cpu_is_omap24xx())
- return;
+ DSSDBG("dss_save_context\n");
- SR(SYSCONFIG);
SR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -122,14 +110,19 @@ void dss_save_context(void)
SR(SDI_CONTROL);
SR(PLL_CONTROL);
}
+
+ dss.ctx_valid = true;
+
+ DSSDBG("context saved\n");
}
-void dss_restore_context(void)
+static void dss_restore_context(void)
{
- if (_omap_dss_wait_reset())
- DSSERR("DSS not coming out of reset after sleep\n");
+ DSSDBG("dss_restore_context\n");
+
+ if (!dss.ctx_valid)
+ return;
- RR(SYSCONFIG);
RR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -137,6 +130,8 @@ void dss_restore_context(void)
RR(SDI_CONTROL);
RR(PLL_CONTROL);
}
+
+ DSSDBG("context restored\n");
}
#undef SR
@@ -234,6 +229,7 @@ const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
return dss_generic_clk_source_names[clk_src];
}
+
void dss_dump_clocks(struct seq_file *s)
{
unsigned long dpll4_ck_rate;
@@ -241,13 +237,14 @@ void dss_dump_clocks(struct seq_file *s)
const char *fclk_name, *fclk_real_name;
unsigned long fclk_rate;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dss_runtime_get())
+ return;
seq_printf(s, "- DSS -\n");
fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
- fclk_rate = dss_clk_get_rate(DSS_CLK_FCK);
+ fclk_rate = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
@@ -273,14 +270,15 @@ void dss_dump_clocks(struct seq_file *s)
fclk_rate);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dss_runtime_put();
}
void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dss_runtime_get())
+ return;
DUMPREG(DSS_REVISION);
DUMPREG(DSS_SYSCONFIG);
@@ -294,7 +292,7 @@ void dss_dump_regs(struct seq_file *s)
DUMPREG(DSS_SDI_STATUS);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dss_runtime_put();
#undef DUMPREG
}
@@ -437,7 +435,7 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
} else {
if (cinfo->fck_div != 0)
return -EINVAL;
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ cinfo->fck = clk_get_rate(dss.dss_clk);
}
return 0;
@@ -467,7 +465,7 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
int dss_get_clock_div(struct dss_clock_info *cinfo)
{
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ cinfo->fck = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
unsigned long prate;
@@ -512,7 +510,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
- fck = dss_clk_get_rate(DSS_CLK_FCK);
+ fck = clk_get_rate(dss.dss_clk);
if (req_pck == dss.cache_req_pck &&
((cpu_is_omap34xx() && prate == dss.cache_prate) ||
dss.cache_dss_cinfo.fck == fck)) {
@@ -539,7 +537,7 @@ retry:
if (dss.dpll4_m4_ck == NULL) {
struct dispc_clock_info cur_dispc;
/* XXX can we change the clock on omap2? */
- fck = dss_clk_get_rate(DSS_CLK_FCK);
+ fck = clk_get_rate(dss.dss_clk);
fck_div = 1;
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
@@ -616,28 +614,6 @@ found:
return 0;
}
-static int _omap_dss_wait_reset(void)
-{
- int t = 0;
-
- while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
- if (++t > 1000) {
- DSSERR("soft reset failed\n");
- return -ENODEV;
- }
- udelay(1);
- }
-
- return 0;
-}
-
-static int _omap_dss_reset(void)
-{
- /* Soft reset */
- REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
- return _omap_dss_wait_reset();
-}
-
void dss_set_venc_output(enum omap_dss_venc_type type)
{
int l = 0;
@@ -663,424 +639,88 @@ void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi)
REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */
}
-static int dss_init(void)
+static int dss_get_clocks(void)
{
+ struct clk *clk;
int r;
- u32 rev;
- struct resource *dss_mem;
- struct clk *dpll4_m4_ck;
- dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
- if (!dss_mem) {
- DSSERR("can't get IORESOURCE_MEM DSS\n");
- r = -EINVAL;
- goto fail0;
- }
- dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
- if (!dss.base) {
- DSSERR("can't ioremap DSS\n");
- r = -ENOMEM;
- goto fail0;
+ clk = clk_get(&dss.pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get clock fck\n");
+ r = PTR_ERR(clk);
+ goto err;
}
- /* disable LCD and DIGIT output. This seems to fix the synclost
- * problem that we get, if the bootloader starts the DSS and
- * the kernel resets it */
- omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
-
-#ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET
- /* We need to wait here a bit, otherwise we sometimes start to
- * get synclost errors, and after that only power cycle will
- * restore DSS functionality. I have no idea why this happens.
- * And we have to wait _before_ resetting the DSS, but after
- * enabling clocks.
- *
- * This bug was at least present on OMAP3430. It's unknown
- * if it happens on OMAP2 or OMAP3630.
- */
- msleep(50);
-#endif
-
- _omap_dss_reset();
+ dss.dss_clk = clk;
- /* autoidle */
- REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
-
- /* Select DPLL */
- REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
-
-#ifdef CONFIG_OMAP2_DSS_VENC
- REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
- REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
- REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
-#endif
if (cpu_is_omap34xx()) {
- dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
- if (IS_ERR(dpll4_m4_ck)) {
+ clk = clk_get(NULL, "dpll4_m4_ck");
+ if (IS_ERR(clk)) {
DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dpll4_m4_ck);
- goto fail1;
+ r = PTR_ERR(clk);
+ goto err;
}
} else if (cpu_is_omap44xx()) {
- dpll4_m4_ck = clk_get(NULL, "dpll_per_m5x2_ck");
- if (IS_ERR(dpll4_m4_ck)) {
- DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dpll4_m4_ck);
- goto fail1;
+ clk = clk_get(NULL, "dpll_per_m5x2_ck");
+ if (IS_ERR(clk)) {
+ DSSERR("Failed to get dpll_per_m5x2_ck\n");
+ r = PTR_ERR(clk);
+ goto err;
}
} else { /* omap24xx */
- dpll4_m4_ck = NULL;
+ clk = NULL;
}
- dss.dpll4_m4_ck = dpll4_m4_ck;
-
- dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
- dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
-
- dss_save_context();
-
- rev = dss_read_reg(DSS_REVISION);
- printk(KERN_INFO "OMAP DSS rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+ dss.dpll4_m4_ck = clk;
return 0;
-fail1:
- iounmap(dss.base);
-fail0:
- return r;
-}
-
-static void dss_exit(void)
-{
+err:
+ if (dss.dss_clk)
+ clk_put(dss.dss_clk);
if (dss.dpll4_m4_ck)
clk_put(dss.dpll4_m4_ck);
- iounmap(dss.base);
-}
-
-/* CONTEXT */
-static int dss_get_ctx_id(void)
-{
- struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
- int r;
-
- if (!pdata->board_data->get_last_off_on_transaction_id)
- return 0;
- r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev);
- if (r < 0) {
- dev_err(&dss.pdev->dev, "getting transaction ID failed, "
- "will force context restore\n");
- r = -1;
- }
- return r;
-}
-
-int dss_need_ctx_restore(void)
-{
- int id = dss_get_ctx_id();
-
- if (id < 0 || id != dss.ctx_id) {
- DSSDBG("ctx id %d -> id %d\n",
- dss.ctx_id, id);
- dss.ctx_id = id;
- return 1;
- } else {
- return 0;
- }
-}
-
-static void save_all_ctx(void)
-{
- DSSDBG("save context\n");
-
- dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
-
- dss_save_context();
- dispc_save_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_save_context();
-#endif
-
- dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
-}
-
-static void restore_all_ctx(void)
-{
- DSSDBG("restore context\n");
-
- dss_clk_enable_all_no_ctx();
-
- dss_restore_context();
- dispc_restore_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_restore_context();
-#endif
-
- dss_clk_disable_all_no_ctx();
-}
-
-static int dss_get_clock(struct clk **clock, const char *clk_name)
-{
- struct clk *clk;
-
- clk = clk_get(&dss.pdev->dev, clk_name);
-
- if (IS_ERR(clk)) {
- DSSERR("can't get clock %s", clk_name);
- return PTR_ERR(clk);
- }
-
- *clock = clk;
-
- DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
-
- return 0;
-}
-
-static int dss_get_clocks(void)
-{
- int r;
- struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
-
- dss.dss_ick = NULL;
- dss.dss_fck = NULL;
- dss.dss_sys_clk = NULL;
- dss.dss_tv_fck = NULL;
- dss.dss_video_fck = NULL;
-
- r = dss_get_clock(&dss.dss_ick, "ick");
- if (r)
- goto err;
-
- r = dss_get_clock(&dss.dss_fck, "fck");
- if (r)
- goto err;
-
- if (!pdata->opt_clock_available) {
- r = -ENODEV;
- goto err;
- }
-
- if (pdata->opt_clock_available("sys_clk")) {
- r = dss_get_clock(&dss.dss_sys_clk, "sys_clk");
- if (r)
- goto err;
- }
-
- if (pdata->opt_clock_available("tv_clk")) {
- r = dss_get_clock(&dss.dss_tv_fck, "tv_clk");
- if (r)
- goto err;
- }
-
- if (pdata->opt_clock_available("video_clk")) {
- r = dss_get_clock(&dss.dss_video_fck, "video_clk");
- if (r)
- goto err;
- }
-
- return 0;
-
-err:
- if (dss.dss_ick)
- clk_put(dss.dss_ick);
- if (dss.dss_fck)
- clk_put(dss.dss_fck);
- if (dss.dss_sys_clk)
- clk_put(dss.dss_sys_clk);
- if (dss.dss_tv_fck)
- clk_put(dss.dss_tv_fck);
- if (dss.dss_video_fck)
- clk_put(dss.dss_video_fck);
-
return r;
}
static void dss_put_clocks(void)
{
- if (dss.dss_video_fck)
- clk_put(dss.dss_video_fck);
- if (dss.dss_tv_fck)
- clk_put(dss.dss_tv_fck);
- if (dss.dss_sys_clk)
- clk_put(dss.dss_sys_clk);
- clk_put(dss.dss_fck);
- clk_put(dss.dss_ick);
-}
-
-unsigned long dss_clk_get_rate(enum dss_clock clk)
-{
- switch (clk) {
- case DSS_CLK_ICK:
- return clk_get_rate(dss.dss_ick);
- case DSS_CLK_FCK:
- return clk_get_rate(dss.dss_fck);
- case DSS_CLK_SYSCK:
- return clk_get_rate(dss.dss_sys_clk);
- case DSS_CLK_TVFCK:
- return clk_get_rate(dss.dss_tv_fck);
- case DSS_CLK_VIDFCK:
- return clk_get_rate(dss.dss_video_fck);
- }
-
- BUG();
- return 0;
-}
-
-static unsigned count_clk_bits(enum dss_clock clks)
-{
- unsigned num_clks = 0;
-
- if (clks & DSS_CLK_ICK)
- ++num_clks;
- if (clks & DSS_CLK_FCK)
- ++num_clks;
- if (clks & DSS_CLK_SYSCK)
- ++num_clks;
- if (clks & DSS_CLK_TVFCK)
- ++num_clks;
- if (clks & DSS_CLK_VIDFCK)
- ++num_clks;
-
- return num_clks;
-}
-
-static void dss_clk_enable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_enable(dss.dss_ick);
- if (clks & DSS_CLK_FCK)
- clk_enable(dss.dss_fck);
- if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
- clk_enable(dss.dss_sys_clk);
- if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
- clk_enable(dss.dss_tv_fck);
- if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
- clk_enable(dss.dss_video_fck);
-
- dss.num_clks_enabled += num_clks;
-}
-
-void dss_clk_enable(enum dss_clock clks)
-{
- bool check_ctx = dss.num_clks_enabled == 0;
-
- dss_clk_enable_no_ctx(clks);
-
- /*
- * HACK: On omap4 the registers may not be accessible right after
- * enabling the clocks. At some point this will be handled by
- * pm_runtime, but for the time begin this should make things work.
- */
- if (cpu_is_omap44xx() && check_ctx)
- udelay(10);
-
- if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
- restore_all_ctx();
+ if (dss.dpll4_m4_ck)
+ clk_put(dss.dpll4_m4_ck);
+ clk_put(dss.dss_clk);
}
-static void dss_clk_disable_no_ctx(enum dss_clock clks)
+struct clk *dss_get_ick(void)
{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_disable(dss.dss_ick);
- if (clks & DSS_CLK_FCK)
- clk_disable(dss.dss_fck);
- if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
- clk_disable(dss.dss_sys_clk);
- if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
- clk_disable(dss.dss_tv_fck);
- if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
- clk_disable(dss.dss_video_fck);
-
- dss.num_clks_enabled -= num_clks;
+ return clk_get(&dss.pdev->dev, "ick");
}
-void dss_clk_disable(enum dss_clock clks)
+int dss_runtime_get(void)
{
- if (cpu_is_omap34xx()) {
- unsigned num_clks = count_clk_bits(clks);
-
- BUG_ON(dss.num_clks_enabled < num_clks);
+ int r;
- if (dss.num_clks_enabled == num_clks)
- save_all_ctx();
- }
+ DSSDBG("dss_runtime_get\n");
- dss_clk_disable_no_ctx(clks);
+ r = pm_runtime_get_sync(&dss.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
}
-static void dss_clk_enable_all_no_ctx(void)
+void dss_runtime_put(void)
{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_VIDFCK;
- dss_clk_enable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all_no_ctx(void)
-{
- enum dss_clock clks;
+ int r;
- clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_VIDFCK;
- dss_clk_disable_no_ctx(clks);
-}
+ DSSDBG("dss_runtime_put\n");
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-/* CLOCKS */
-static void core_dump_clocks(struct seq_file *s)
-{
- int i;
- struct clk *clocks[5] = {
- dss.dss_ick,
- dss.dss_fck,
- dss.dss_sys_clk,
- dss.dss_tv_fck,
- dss.dss_video_fck
- };
-
- const char *names[5] = {
- "ick",
- "fck",
- "sys_clk",
- "tv_fck",
- "video_fck"
- };
-
- seq_printf(s, "- CORE -\n");
-
- seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
-
- for (i = 0; i < 5; i++) {
- if (!clocks[i])
- continue;
- seq_printf(s, "%s (%s)%*s\t%lu\t%d\n",
- names[i],
- clocks[i]->name,
- 24 - strlen(names[i]) - strlen(clocks[i]->name),
- "",
- clk_get_rate(clocks[i]),
- clocks[i]->usecount);
- }
+ r = pm_runtime_put(&dss.pdev->dev);
+ WARN_ON(r < 0);
}
-#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
/* DEBUGFS */
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
void dss_debug_dump_clocks(struct seq_file *s)
{
- core_dump_clocks(s);
dss_dump_clocks(s);
dispc_dump_clocks(s);
#ifdef CONFIG_OMAP2_DSS_DSI
@@ -1089,28 +729,51 @@ void dss_debug_dump_clocks(struct seq_file *s)
}
#endif
-
/* DSS HW IP initialisation */
static int omap_dsshw_probe(struct platform_device *pdev)
{
+ struct resource *dss_mem;
+ u32 rev;
int r;
dss.pdev = pdev;
+ dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
+ if (!dss_mem) {
+ DSSERR("can't get IORESOURCE_MEM DSS\n");
+ r = -EINVAL;
+ goto err_ioremap;
+ }
+ dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
+ if (!dss.base) {
+ DSSERR("can't ioremap DSS\n");
+ r = -ENOMEM;
+ goto err_ioremap;
+ }
+
r = dss_get_clocks();
if (r)
goto err_clocks;
- dss_clk_enable_all_no_ctx();
+ pm_runtime_enable(&pdev->dev);
- dss.ctx_id = dss_get_ctx_id();
- DSSDBG("initial ctx id %u\n", dss.ctx_id);
+ r = dss_runtime_get();
+ if (r)
+ goto err_runtime_get;
- r = dss_init();
- if (r) {
- DSSERR("Failed to initialize DSS\n");
- goto err_dss;
- }
+ /* Select DPLL */
+ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+ REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
+ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
+ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
+#endif
+ dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
+ dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
+ dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
r = dpi_init();
if (r) {
@@ -1124,42 +787,66 @@ static int omap_dsshw_probe(struct platform_device *pdev)
goto err_sdi;
}
- dss_clk_disable_all_no_ctx();
+ rev = dss_read_reg(DSS_REVISION);
+ printk(KERN_INFO "OMAP DSS rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ dss_runtime_put();
+
return 0;
err_sdi:
dpi_exit();
err_dpi:
- dss_exit();
-err_dss:
- dss_clk_disable_all_no_ctx();
+ dss_runtime_put();
+err_runtime_get:
+ pm_runtime_disable(&pdev->dev);
dss_put_clocks();
err_clocks:
+ iounmap(dss.base);
+err_ioremap:
return r;
}
static int omap_dsshw_remove(struct platform_device *pdev)
{
+ dpi_exit();
+ sdi_exit();
- dss_exit();
+ iounmap(dss.base);
- /*
- * As part of hwmod changes, DSS is not the only controller of dss
- * clocks; hwmod framework itself will also enable clocks during hwmod
- * init for dss, and autoidle is set in h/w for DSS. Hence, there's no
- * need to disable clocks if their usecounts > 1.
- */
- WARN_ON(dss.num_clks_enabled > 0);
+ pm_runtime_disable(&pdev->dev);
dss_put_clocks();
+
+ return 0;
+}
+
+static int dss_runtime_suspend(struct device *dev)
+{
+ dss_save_context();
+ clk_disable(dss.dss_clk);
return 0;
}
+static int dss_runtime_resume(struct device *dev)
+{
+ clk_enable(dss.dss_clk);
+ dss_restore_context();
+ return 0;
+}
+
+static const struct dev_pm_ops dss_pm_ops = {
+ .runtime_suspend = dss_runtime_suspend,
+ .runtime_resume = dss_runtime_resume,
+};
+
static struct platform_driver omap_dsshw_driver = {
.probe = omap_dsshw_probe,
.remove = omap_dsshw_remove,
.driver = {
.name = "omapdss_dss",
.owner = THIS_MODULE,
+ .pm = &dss_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 8ab6d43329b..9c94b1152c2 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -97,26 +97,12 @@ extern unsigned int dss_debug;
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
-enum omap_burst_size {
- OMAP_DSS_BURST_4x32 = 0,
- OMAP_DSS_BURST_8x32 = 1,
- OMAP_DSS_BURST_16x32 = 2,
-};
-
enum omap_parallel_interface_mode {
OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */
OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */
OMAP_DSS_PARALLELMODE_DSI,
};
-enum dss_clock {
- DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */
- DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */
- DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */
- DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */
- DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
-};
-
enum dss_hdmi_venc_clk_source_select {
DSS_VENC_TV_CLK = 0,
DSS_HDMI_M_PCLK = 1,
@@ -194,7 +180,7 @@ void dss_uninit_device(struct platform_device *pdev,
bool dss_use_replication(struct omap_dss_device *dssdev,
enum omap_color_mode mode);
void default_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high);
/* manager */
@@ -220,13 +206,12 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
int dss_init_platform_driver(void);
void dss_uninit_platform_driver(void);
+int dss_runtime_get(void);
+void dss_runtime_put(void);
+
+struct clk *dss_get_ick(void);
+
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
-void dss_save_context(void);
-void dss_restore_context(void);
-void dss_clk_enable(enum dss_clock clks);
-void dss_clk_disable(enum dss_clock clks);
-unsigned long dss_clk_get_rate(enum dss_clock clk);
-int dss_need_ctx_restore(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
@@ -283,15 +268,15 @@ struct file_operations;
int dsi_init_platform_driver(void);
void dsi_uninit_platform_driver(void);
+int dsi_runtime_get(struct platform_device *dsidev);
+void dsi_runtime_put(struct platform_device *dsidev);
+
void dsi_dump_clocks(struct seq_file *s);
void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
const struct file_operations *debug_fops);
void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
const struct file_operations *debug_fops);
-void dsi_save_context(void);
-void dsi_restore_context(void);
-
int dsi_init_display(struct omap_dss_device *display);
void dsi_irq_handler(void);
unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
@@ -304,7 +289,7 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
bool enable_hsdiv);
void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high);
void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
@@ -317,6 +302,13 @@ static inline int dsi_init_platform_driver(void)
static inline void dsi_uninit_platform_driver(void)
{
}
+static inline int dsi_runtime_get(struct platform_device *dsidev)
+{
+ return 0;
+}
+static inline void dsi_runtime_put(struct platform_device *dsidev)
+{
+}
static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
{
WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
@@ -384,8 +376,8 @@ void dispc_dump_regs(struct seq_file *s);
void dispc_irq_handler(void);
void dispc_fake_vsync_irq(void);
-void dispc_save_context(void);
-void dispc_restore_context(void);
+int dispc_runtime_get(void);
+void dispc_runtime_put(void);
void dispc_enable_sidle(void);
void dispc_disable_sidle(void);
@@ -398,10 +390,12 @@ void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable);
void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
void dispc_set_digit_size(u16 width, u16 height);
u32 dispc_get_plane_fifo_size(enum omap_plane plane);
-void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
+void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
void dispc_enable_fifomerge(bool enable);
-void dispc_set_burst_size(enum omap_plane plane,
- enum omap_burst_size burst_size);
+u32 dispc_get_burst_size(enum omap_plane plane);
+void dispc_enable_cpr(enum omap_channel channel, bool enable);
+void dispc_set_cpr_coef(enum omap_channel channel,
+ struct omap_dss_cpr_coefs *coefs);
void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 1c18888e5df..b415c4ee621 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -49,6 +49,9 @@ struct omap_dss_features {
const enum omap_color_mode *supported_color_modes;
const char * const *clksrc_names;
const struct dss_param_range *dss_params;
+
+ const u32 buffer_size_unit;
+ const u32 burst_size_unit;
};
/* This struct is assigned to one of the below during initialization */
@@ -274,6 +277,8 @@ static const struct omap_dss_features omap2_dss_features = {
.supported_color_modes = omap2_dss_supported_color_modes,
.clksrc_names = omap2_dss_clk_source_names,
.dss_params = omap2_dss_param_range,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
};
/* OMAP3 DSS Features */
@@ -286,7 +291,9 @@ static const struct omap_dss_features omap3430_dss_features = {
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF |
- FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC,
+ FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC |
+ FEAT_VENC_REQUIRES_TV_DAC_CLK | FEAT_CPR | FEAT_PRELOAD |
+ FEAT_FIR_COEF_V,
.num_mgrs = 2,
.num_ovls = 3,
@@ -294,6 +301,8 @@ static const struct omap_dss_features omap3430_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
};
static const struct omap_dss_features omap3630_dss_features = {
@@ -306,7 +315,8 @@ static const struct omap_dss_features omap3630_dss_features = {
FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG |
- FEAT_DSI_PLL_FREQSEL,
+ FEAT_DSI_PLL_FREQSEL | FEAT_CPR | FEAT_PRELOAD |
+ FEAT_FIR_COEF_V,
.num_mgrs = 2,
.num_ovls = 3,
@@ -314,6 +324,8 @@ static const struct omap_dss_features omap3630_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
};
/* OMAP4 DSS Features */
@@ -327,7 +339,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
- FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
+ FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 |
+ FEAT_CPR | FEAT_PRELOAD | FEAT_FIR_COEF_V,
.num_mgrs = 3,
.num_ovls = 3,
@@ -335,6 +348,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
};
/* For all the other OMAP4 versions */
@@ -348,7 +363,8 @@ static const struct omap_dss_features omap4_dss_features = {
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE |
- FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
+ FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | FEAT_CPR |
+ FEAT_PRELOAD | FEAT_FIR_COEF_V,
.num_mgrs = 3,
.num_ovls = 3,
@@ -356,6 +372,8 @@ static const struct omap_dss_features omap4_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
};
/* Functions returning values related to a DSS feature */
@@ -401,6 +419,16 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
return omap_current_dss_features->clksrc_names[id];
}
+u32 dss_feat_get_buffer_size_unit(void)
+{
+ return omap_current_dss_features->buffer_size_unit;
+}
+
+u32 dss_feat_get_burst_size_unit(void)
+{
+ return omap_current_dss_features->burst_size_unit;
+}
+
/* DSS has_feature check */
bool dss_has_feature(enum dss_feat_id id)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 07b346f7d91..b7398cbcda5 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -51,6 +51,10 @@ enum dss_feat_id {
FEAT_HDMI_CTS_SWMODE = 1 << 19,
FEAT_HANDLE_UV_SEPARATE = 1 << 20,
FEAT_ATTR2 = 1 << 21,
+ FEAT_VENC_REQUIRES_TV_DAC_CLK = 1 << 22,
+ FEAT_CPR = 1 << 23,
+ FEAT_PRELOAD = 1 << 24,
+ FEAT_FIR_COEF_V = 1 << 25,
};
/* DSS register field id */
@@ -90,6 +94,9 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
+u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
+u32 dss_feat_get_burst_size_unit(void); /* in bytes */
+
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
void dss_features_init(void);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index b0555f4f0a7..256f27a9064 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -29,6 +29,9 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
#include <video/omapdss.h>
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
@@ -51,6 +54,9 @@ static struct {
u8 edid_set;
bool custom_set;
struct hdmi_config cfg;
+
+ struct clk *sys_clk;
+ struct clk *hdmi_clk;
} hdmi;
/*
@@ -162,6 +168,27 @@ static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx,
return val;
}
+static int hdmi_runtime_get(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void hdmi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_put\n");
+
+ r = pm_runtime_put(&hdmi.pdev->dev);
+ WARN_ON(r < 0);
+}
+
int hdmi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -311,30 +338,11 @@ static int hdmi_phy_init(void)
return 0;
}
-static int hdmi_wait_softreset(void)
-{
- /* reset W1 */
- REG_FLD_MOD(HDMI_WP_SYSCONFIG, 0x1, 0, 0);
-
- /* wait till SOFTRESET == 0 */
- if (hdmi_wait_for_bit_change(HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) {
- DSSERR("sysconfig reset failed\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
static int hdmi_pll_program(struct hdmi_pll_info *fmt)
{
u16 r = 0;
enum hdmi_clk_refsel refsel;
- /* wait for wrapper reset */
- r = hdmi_wait_softreset();
- if (r)
- return r;
-
r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
if (r)
return r;
@@ -1064,7 +1072,7 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
unsigned long clkin, refclk;
u32 mf;
- clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000;
+ clkin = clk_get_rate(hdmi.sys_clk) / 10000;
/*
* Input clock is predivided by N + 1
* out put of which is reference clk
@@ -1098,16 +1106,6 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
}
-static void hdmi_enable_clocks(int enable)
-{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK |
- DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK |
- DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
-}
-
static int hdmi_power_on(struct omap_dss_device *dssdev)
{
int r, code = 0;
@@ -1115,7 +1113,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
struct omap_video_timings *p;
unsigned long phy;
- hdmi_enable_clocks(1);
+ r = hdmi_runtime_get();
+ if (r)
+ return r;
dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
@@ -1180,7 +1180,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
return 0;
err:
- hdmi_enable_clocks(0);
+ hdmi_runtime_put();
return -EIO;
}
@@ -1191,7 +1191,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
hdmi_wp_video_start(0);
hdmi_phy_off();
hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
- hdmi_enable_clocks(0);
+ hdmi_runtime_put();
hdmi.edid_set = 0;
}
@@ -1686,14 +1686,43 @@ static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
};
#endif
+static int hdmi_get_clocks(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ hdmi.sys_clk = clk;
+
+ clk = clk_get(&pdev->dev, "dss_48mhz_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get hdmi_clk\n");
+ clk_put(hdmi.sys_clk);
+ return PTR_ERR(clk);
+ }
+
+ hdmi.hdmi_clk = clk;
+
+ return 0;
+}
+
+static void hdmi_put_clocks(void)
+{
+ if (hdmi.sys_clk)
+ clk_put(hdmi.sys_clk);
+ if (hdmi.hdmi_clk)
+ clk_put(hdmi.hdmi_clk);
+}
+
/* HDMI HW IP initialisation */
static int omapdss_hdmihw_probe(struct platform_device *pdev)
{
struct resource *hdmi_mem;
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
- int ret;
-#endif
+ int r;
hdmi.pdata = pdev->dev.platform_data;
hdmi.pdev = pdev;
@@ -1713,17 +1742,25 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ r = hdmi_get_clocks(pdev);
+ if (r) {
+ iounmap(hdmi.base_wp);
+ return r;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
hdmi_panel_init();
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
/* Register ASoC codec DAI */
- ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
+ r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
&hdmi_codec_dai_drv, 1);
- if (ret) {
+ if (r) {
DSSERR("can't register ASoC HDMI audio codec\n");
- return ret;
+ return r;
}
#endif
return 0;
@@ -1738,17 +1775,62 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev)
snd_soc_unregister_codec(&pdev->dev);
#endif
+ pm_runtime_disable(&pdev->dev);
+
+ hdmi_put_clocks();
+
iounmap(hdmi.base_wp);
return 0;
}
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ clk_disable(hdmi.hdmi_clk);
+ clk_disable(hdmi.sys_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ goto err_get_dispc;
+
+
+ clk_enable(hdmi.sys_clk);
+ clk_enable(hdmi.hdmi_clk);
+
+ return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
static struct platform_driver omapdss_hdmihw_driver = {
.probe = omapdss_hdmihw_probe,
.remove = omapdss_hdmihw_remove,
.driver = {
.name = "omapdss_hdmi",
.owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 9aeea50e33f..13d72d5c714 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -275,6 +275,108 @@ static ssize_t manager_alpha_blending_enabled_store(
return size;
}
+static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.cpr_enable);
+}
+
+static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ int v;
+ int r;
+ bool enable;
+
+ if (!dss_has_feature(FEAT_CPR))
+ return -ENODEV;
+
+ r = kstrtoint(buf, 0, &v);
+ if (r)
+ return r;
+
+ enable = !!v;
+
+ mgr->get_manager_info(mgr, &info);
+
+ if (info.cpr_enable == enable)
+ return size;
+
+ info.cpr_enable = enable;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE,
+ "%d %d %d %d %d %d %d %d %d\n",
+ info.cpr_coefs.rr,
+ info.cpr_coefs.rg,
+ info.cpr_coefs.rb,
+ info.cpr_coefs.gr,
+ info.cpr_coefs.gg,
+ info.cpr_coefs.gb,
+ info.cpr_coefs.br,
+ info.cpr_coefs.bg,
+ info.cpr_coefs.bb);
+}
+
+static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ struct omap_dss_cpr_coefs coefs;
+ int r, i;
+ s16 *arr;
+
+ if (!dss_has_feature(FEAT_CPR))
+ return -ENODEV;
+
+ if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd",
+ &coefs.rr, &coefs.rg, &coefs.rb,
+ &coefs.gr, &coefs.gg, &coefs.gb,
+ &coefs.br, &coefs.bg, &coefs.bb) != 9)
+ return -EINVAL;
+
+ arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
+ coefs.gr, coefs.gg, coefs.gb,
+ coefs.br, coefs.bg, coefs.bb };
+
+ for (i = 0; i < 9; ++i) {
+ if (arr[i] < -512 || arr[i] > 511)
+ return -EINVAL;
+ }
+
+ mgr->get_manager_info(mgr, &info);
+
+ info.cpr_coefs = coefs;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
struct manager_attribute {
struct attribute attr;
ssize_t (*show)(struct omap_overlay_manager *, char *);
@@ -300,6 +402,12 @@ static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
manager_alpha_blending_enabled_show,
manager_alpha_blending_enabled_store);
+static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR,
+ manager_cpr_enable_show,
+ manager_cpr_enable_store);
+static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR,
+ manager_cpr_coef_show,
+ manager_cpr_coef_store);
static struct attribute *manager_sysfs_attrs[] = {
@@ -310,6 +418,8 @@ static struct attribute *manager_sysfs_attrs[] = {
&manager_attr_trans_key_value.attr,
&manager_attr_trans_key_enabled.attr,
&manager_attr_alpha_blending_enabled.attr,
+ &manager_attr_cpr_enable.attr,
+ &manager_attr_cpr_coef.attr,
NULL
};
@@ -391,33 +501,14 @@ struct overlay_cache_data {
bool enabled;
- u32 paddr;
- void __iomem *vaddr;
- u32 p_uv_addr; /* relevant for NV12 format only */
- u16 screen_width;
- u16 width;
- u16 height;
- enum omap_color_mode color_mode;
- u8 rotation;
- enum omap_dss_rotation_type rotation_type;
- bool mirror;
-
- u16 pos_x;
- u16 pos_y;
- u16 out_width; /* if 0, out_width == width */
- u16 out_height; /* if 0, out_height == height */
- u8 global_alpha;
- u8 pre_mult_alpha;
+ struct omap_overlay_info info;
enum omap_channel channel;
bool replication;
bool ilace;
- enum omap_burst_size burst_size;
u32 fifo_low;
u32 fifo_high;
-
- bool manual_update;
};
struct manager_cache_data {
@@ -429,15 +520,8 @@ struct manager_cache_data {
* VSYNC/EVSYNC */
bool shadow_dirty;
- u32 default_color;
-
- enum omap_dss_trans_key_type trans_key_type;
- u32 trans_key;
- bool trans_enabled;
-
- bool alpha_enabled;
+ struct omap_overlay_manager_info info;
- bool manual_upd_display;
bool manual_update;
bool do_manual_update;
@@ -539,24 +623,15 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ return 0;
+
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
|| dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- enum omap_dss_update_mode mode;
- mode = dssdev->driver->get_update_mode(dssdev);
- if (mode != OMAP_DSS_UPDATE_AUTO)
- return 0;
-
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_FRAMEDONE
- : DISPC_IRQ_FRAMEDONE2;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC
- : DISPC_IRQ_VSYNC2;
- }
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
}
mc = &dss_cache.manager_cache[mgr->id];
@@ -617,24 +692,15 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ return 0;
+
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
|| dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- enum omap_dss_update_mode mode;
- mode = dssdev->driver->get_update_mode(dssdev);
- if (mode != OMAP_DSS_UPDATE_AUTO)
- return 0;
-
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_FRAMEDONE
- : DISPC_IRQ_FRAMEDONE2;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC
- : DISPC_IRQ_VSYNC2;
- }
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
}
oc = &dss_cache.overlay_cache[ovl->id];
@@ -720,10 +786,12 @@ static bool rectangle_intersects(int x1, int y1, int w1, int h1,
static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc)
{
- if (oc->out_width != 0 && oc->width != oc->out_width)
+ struct omap_overlay_info *oi = &oc->info;
+
+ if (oi->out_width != 0 && oi->width != oi->out_width)
return true;
- if (oc->out_height != 0 && oc->height != oc->out_height)
+ if (oi->out_height != 0 && oi->height != oi->out_height)
return true;
return false;
@@ -733,6 +801,8 @@ static int configure_overlay(enum omap_plane plane)
{
struct overlay_cache_data *c;
struct manager_cache_data *mc;
+ struct omap_overlay_info *oi;
+ struct omap_overlay_manager_info *mi;
u16 outw, outh;
u16 x, y, w, h;
u32 paddr;
@@ -742,6 +812,7 @@ static int configure_overlay(enum omap_plane plane)
DSSDBGF("%d", plane);
c = &dss_cache.overlay_cache[plane];
+ oi = &c->info;
if (!c->enabled) {
dispc_enable_plane(plane, 0);
@@ -749,21 +820,22 @@ static int configure_overlay(enum omap_plane plane)
}
mc = &dss_cache.manager_cache[c->channel];
+ mi = &mc->info;
- x = c->pos_x;
- y = c->pos_y;
- w = c->width;
- h = c->height;
- outw = c->out_width == 0 ? c->width : c->out_width;
- outh = c->out_height == 0 ? c->height : c->out_height;
- paddr = c->paddr;
+ x = oi->pos_x;
+ y = oi->pos_y;
+ w = oi->width;
+ h = oi->height;
+ outw = oi->out_width == 0 ? oi->width : oi->out_width;
+ outh = oi->out_height == 0 ? oi->height : oi->out_height;
+ paddr = oi->paddr;
orig_w = w;
orig_h = h;
orig_outw = outw;
orig_outh = outh;
- if (c->manual_update && mc->do_manual_update) {
+ if (mc->manual_update && mc->do_manual_update) {
unsigned bpp;
unsigned scale_x_m = w, scale_x_d = outw;
unsigned scale_y_m = h, scale_y_d = outh;
@@ -775,7 +847,7 @@ static int configure_overlay(enum omap_plane plane)
return 0;
}
- switch (c->color_mode) {
+ switch (oi->color_mode) {
case OMAP_DSS_COLOR_NV12:
bpp = 8;
break;
@@ -805,23 +877,23 @@ static int configure_overlay(enum omap_plane plane)
BUG();
}
- if (mc->x > c->pos_x) {
+ if (mc->x > oi->pos_x) {
x = 0;
- outw -= (mc->x - c->pos_x);
- paddr += (mc->x - c->pos_x) *
+ outw -= (mc->x - oi->pos_x);
+ paddr += (mc->x - oi->pos_x) *
scale_x_m / scale_x_d * bpp / 8;
} else {
- x = c->pos_x - mc->x;
+ x = oi->pos_x - mc->x;
}
- if (mc->y > c->pos_y) {
+ if (mc->y > oi->pos_y) {
y = 0;
- outh -= (mc->y - c->pos_y);
- paddr += (mc->y - c->pos_y) *
+ outh -= (mc->y - oi->pos_y);
+ paddr += (mc->y - oi->pos_y) *
scale_y_m / scale_y_d *
- c->screen_width * bpp / 8;
+ oi->screen_width * bpp / 8;
} else {
- y = c->pos_y - mc->y;
+ y = oi->pos_y - mc->y;
}
if (mc->w < (x + outw))
@@ -840,8 +912,8 @@ static int configure_overlay(enum omap_plane plane)
* the width if the original width was bigger.
*/
if ((w & 1) &&
- (c->color_mode == OMAP_DSS_COLOR_YUV2 ||
- c->color_mode == OMAP_DSS_COLOR_UYVY)) {
+ (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
+ oi->color_mode == OMAP_DSS_COLOR_UYVY)) {
if (orig_w > w)
w += 1;
else
@@ -851,19 +923,19 @@ static int configure_overlay(enum omap_plane plane)
r = dispc_setup_plane(plane,
paddr,
- c->screen_width,
+ oi->screen_width,
x, y,
w, h,
outw, outh,
- c->color_mode,
+ oi->color_mode,
c->ilace,
- c->rotation_type,
- c->rotation,
- c->mirror,
- c->global_alpha,
- c->pre_mult_alpha,
+ oi->rotation_type,
+ oi->rotation,
+ oi->mirror,
+ oi->global_alpha,
+ oi->pre_mult_alpha,
c->channel,
- c->p_uv_addr);
+ oi->p_uv_addr);
if (r) {
/* this shouldn't happen */
@@ -874,8 +946,7 @@ static int configure_overlay(enum omap_plane plane)
dispc_enable_replication(plane, c->replication);
- dispc_set_burst_size(plane, c->burst_size);
- dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
+ dispc_set_fifo_threshold(plane, c->fifo_low, c->fifo_high);
dispc_enable_plane(plane, 1);
@@ -884,16 +955,21 @@ static int configure_overlay(enum omap_plane plane)
static void configure_manager(enum omap_channel channel)
{
- struct manager_cache_data *c;
+ struct omap_overlay_manager_info *mi;
DSSDBGF("%d", channel);
- c = &dss_cache.manager_cache[channel];
+ /* picking info from the cache */
+ mi = &dss_cache.manager_cache[channel].info;
- dispc_set_default_color(channel, c->default_color);
- dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
- dispc_enable_trans_key(channel, c->trans_enabled);
- dispc_enable_alpha_blending(channel, c->alpha_enabled);
+ dispc_set_default_color(channel, mi->default_color);
+ dispc_set_trans_key(channel, mi->trans_key_type, mi->trans_key);
+ dispc_enable_trans_key(channel, mi->trans_enabled);
+ dispc_enable_alpha_blending(channel, mi->alpha_enabled);
+ if (dss_has_feature(FEAT_CPR)) {
+ dispc_enable_cpr(channel, mi->cpr_enable);
+ dispc_set_cpr_coef(channel, &mi->cpr_coefs);
+ }
}
/* configure_dispc() tries to write values from cache to shadow registers.
@@ -928,7 +1004,7 @@ static int configure_dispc(void)
if (!oc->dirty)
continue;
- if (oc->manual_update && !mc->do_manual_update)
+ if (mc->manual_update && !mc->do_manual_update)
continue;
if (mgr_busy[oc->channel]) {
@@ -976,7 +1052,7 @@ static int configure_dispc(void)
/* We don't need GO with manual update display. LCD iface will
* always be turned off after frame, and new settings will be
* taken in to use at next update */
- if (!mc->manual_upd_display)
+ if (!mc->manual_update)
dispc_go(i);
}
@@ -1011,6 +1087,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
{
struct overlay_cache_data *oc;
struct manager_cache_data *mc;
+ struct omap_overlay_info *oi;
const int num_ovls = dss_feat_get_num_ovls();
struct omap_overlay_manager *mgr;
int i;
@@ -1053,6 +1130,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
unsigned outw, outh;
oc = &dss_cache.overlay_cache[i];
+ oi = &oc->info;
if (oc->channel != mgr->id)
continue;
@@ -1068,39 +1146,39 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
if (!dispc_is_overlay_scaled(oc))
continue;
- outw = oc->out_width == 0 ?
- oc->width : oc->out_width;
- outh = oc->out_height == 0 ?
- oc->height : oc->out_height;
+ outw = oi->out_width == 0 ?
+ oi->width : oi->out_width;
+ outh = oi->out_height == 0 ?
+ oi->height : oi->out_height;
/* is the overlay outside the update region? */
if (!rectangle_intersects(x, y, w, h,
- oc->pos_x, oc->pos_y,
+ oi->pos_x, oi->pos_y,
outw, outh))
continue;
/* if the overlay totally inside the update region? */
- if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh,
+ if (rectangle_subset(oi->pos_x, oi->pos_y, outw, outh,
x, y, w, h))
continue;
- if (x > oc->pos_x)
- x1 = oc->pos_x;
+ if (x > oi->pos_x)
+ x1 = oi->pos_x;
else
x1 = x;
- if (y > oc->pos_y)
- y1 = oc->pos_y;
+ if (y > oi->pos_y)
+ y1 = oi->pos_y;
else
y1 = y;
- if ((x + w) < (oc->pos_x + outw))
- x2 = oc->pos_x + outw;
+ if ((x + w) < (oi->pos_x + outw))
+ x2 = oi->pos_x + outw;
else
x2 = x + w;
- if ((y + h) < (oc->pos_y + outh))
- y2 = oc->pos_y + outh;
+ if ((y + h) < (oi->pos_y + outh))
+ y2 = oi->pos_y + outh;
else
y2 = y + h;
@@ -1236,6 +1314,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
spin_lock_irqsave(&dss_cache.lock, flags);
/* Configure overlays */
@@ -1275,23 +1357,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
ovl->info_dirty = false;
oc->dirty = true;
-
- oc->paddr = ovl->info.paddr;
- oc->vaddr = ovl->info.vaddr;
- oc->p_uv_addr = ovl->info.p_uv_addr;
- oc->screen_width = ovl->info.screen_width;
- oc->width = ovl->info.width;
- oc->height = ovl->info.height;
- oc->color_mode = ovl->info.color_mode;
- oc->rotation = ovl->info.rotation;
- oc->rotation_type = ovl->info.rotation_type;
- oc->mirror = ovl->info.mirror;
- oc->pos_x = ovl->info.pos_x;
- oc->pos_y = ovl->info.pos_y;
- oc->out_width = ovl->info.out_width;
- oc->out_height = ovl->info.out_height;
- oc->global_alpha = ovl->info.global_alpha;
- oc->pre_mult_alpha = ovl->info.pre_mult_alpha;
+ oc->info = ovl->info;
oc->replication =
dss_use_replication(dssdev, ovl->info.color_mode);
@@ -1302,11 +1368,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
oc->enabled = true;
- oc->manual_update =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
- dssdev->driver->get_update_mode(dssdev) !=
- OMAP_DSS_UPDATE_AUTO;
-
++num_planes_enabled;
}
@@ -1334,20 +1395,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
mgr->info_dirty = false;
mc->dirty = true;
-
- mc->default_color = mgr->info.default_color;
- mc->trans_key_type = mgr->info.trans_key_type;
- mc->trans_key = mgr->info.trans_key;
- mc->trans_enabled = mgr->info.trans_enabled;
- mc->alpha_enabled = mgr->info.alpha_enabled;
-
- mc->manual_upd_display =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+ mc->info = mgr->info;
mc->manual_update =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
- dssdev->driver->get_update_mode(dssdev) !=
- OMAP_DSS_UPDATE_AUTO;
+ dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
}
/* XXX TODO: Try to get fifomerge working. The problem is that it
@@ -1368,7 +1419,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
/* Configure overlay fifos */
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_dss_device *dssdev;
- u32 size;
+ u32 size, burst_size;
ovl = omap_dss_get_overlay(i);
@@ -1386,6 +1437,8 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
if (use_fifomerge)
size *= 3;
+ burst_size = dispc_get_burst_size(ovl->id);
+
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_DPI:
case OMAP_DISPLAY_TYPE_DBI:
@@ -1393,13 +1446,13 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_HDMI:
default_get_overlay_fifo_thresholds(ovl->id, size,
- &oc->burst_size, &oc->fifo_low,
+ burst_size, &oc->fifo_low,
&oc->fifo_high);
break;
#ifdef CONFIG_OMAP2_DSS_DSI
case OMAP_DISPLAY_TYPE_DSI:
dsi_get_overlay_fifo_thresholds(ovl->id, size,
- &oc->burst_size, &oc->fifo_low,
+ burst_size, &oc->fifo_low,
&oc->fifo_high);
break;
#endif
@@ -1409,7 +1462,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
}
r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
if (!dss_cache.irq_enabled) {
u32 mask;
@@ -1422,10 +1474,11 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
dss_cache.irq_enabled = true;
}
configure_dispc();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
spin_unlock_irqrestore(&dss_cache.lock, flags);
+ dispc_runtime_put();
+
return r;
}
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 0f08025b1f0..c84380c53c3 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -84,32 +84,42 @@ static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
old_mgr = ovl->manager;
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
/* detach old manager */
if (old_mgr) {
r = ovl->unset_manager(ovl);
if (r) {
DSSERR("detach failed\n");
- return r;
+ goto err;
}
r = old_mgr->apply(old_mgr);
if (r)
- return r;
+ goto err;
}
if (mgr) {
r = ovl->set_manager(ovl, mgr);
if (r) {
DSSERR("Failed to attach overlay\n");
- return r;
+ goto err;
}
r = mgr->apply(mgr);
if (r)
- return r;
+ goto err;
}
+ dispc_runtime_put();
+
return size;
+
+err:
+ dispc_runtime_put();
+ return r;
}
static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
@@ -238,6 +248,9 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
u8 alpha;
struct omap_overlay_info info;
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
+ return -ENODEV;
+
r = kstrtou8(buf, 0, &alpha);
if (r)
return r;
@@ -504,7 +517,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
ovl->manager = mgr;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
/* XXX: When there is an overlay on a DSI manual update display, and
* the overlay is first disabled, then moved to tv, and enabled, we
* seem to get SYNC_LOST_DIGIT error.
@@ -518,7 +530,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
* the overlay, but before moving the overlay to TV.
*/
dispc_set_channel_out(ovl->id, mgr->id);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
}
@@ -719,6 +730,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
}
if (mgr) {
+ dispc_runtime_get();
+
for (i = 0; i < dss_feat_get_num_ovls(); i++) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
@@ -728,6 +741,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
omap_dss_set_manager(ovl, mgr);
}
}
+
+ dispc_runtime_put();
}
}
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index c06fbe0bc67..39f4c597026 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -33,6 +33,8 @@
#include <linux/hrtimer.h>
#include <linux/seq_file.h>
#include <linux/semaphore.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -120,12 +122,25 @@ static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
return __raw_readl(rfbi.base + idx.idx);
}
-static void rfbi_enable_clocks(bool enable)
+static int rfbi_runtime_get(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+
+ DSSDBG("rfbi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&rfbi.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void rfbi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("rfbi_runtime_put\n");
+
+ r = pm_runtime_put(&rfbi.pdev->dev);
+ WARN_ON(r < 0);
}
void rfbi_bus_lock(void)
@@ -805,7 +820,8 @@ void rfbi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (rfbi_runtime_get())
+ return;
DUMPREG(RFBI_REVISION);
DUMPREG(RFBI_SYSCONFIG);
@@ -836,7 +852,7 @@ void rfbi_dump_regs(struct seq_file *s)
DUMPREG(RFBI_VSYNC_WIDTH);
DUMPREG(RFBI_HSYNC_WIDTH);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ rfbi_runtime_put();
#undef DUMPREG
}
@@ -844,7 +860,9 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
int r;
- rfbi_enable_clocks(1);
+ r = rfbi_runtime_get();
+ if (r)
+ return r;
r = omap_dss_start_device(dssdev);
if (r) {
@@ -879,6 +897,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
err1:
omap_dss_stop_device(dssdev);
err0:
+ rfbi_runtime_put();
return r;
}
EXPORT_SYMBOL(omapdss_rfbi_display_enable);
@@ -889,7 +908,7 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
DISPC_IRQ_FRAMEDONE);
omap_dss_stop_device(dssdev);
- rfbi_enable_clocks(0);
+ rfbi_runtime_put();
}
EXPORT_SYMBOL(omapdss_rfbi_display_disable);
@@ -904,8 +923,9 @@ int rfbi_init_display(struct omap_dss_device *dssdev)
static int omap_rfbihw_probe(struct platform_device *pdev)
{
u32 rev;
- u32 l;
struct resource *rfbi_mem;
+ struct clk *clk;
+ int r;
rfbi.pdev = pdev;
@@ -914,46 +934,102 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
if (!rfbi_mem) {
DSSERR("can't get IORESOURCE_MEM RFBI\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_ioremap;
}
rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem));
if (!rfbi.base) {
DSSERR("can't ioremap RFBI\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_ioremap;
}
- rfbi_enable_clocks(1);
+ pm_runtime_enable(&pdev->dev);
+
+ r = rfbi_runtime_get();
+ if (r)
+ goto err_get_rfbi;
msleep(10);
- rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+ if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap3630())
+ clk = dss_get_ick();
+ else
+ clk = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get ick\n");
+ r = PTR_ERR(clk);
+ goto err_get_ick;
+ }
+
+ rfbi.l4_khz = clk_get_rate(clk) / 1000;
- /* Enable autoidle and smart-idle */
- l = rfbi_read_reg(RFBI_SYSCONFIG);
- l |= (1 << 0) | (2 << 3);
- rfbi_write_reg(RFBI_SYSCONFIG, l);
+ clk_put(clk);
rev = rfbi_read_reg(RFBI_REVISION);
dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- rfbi_enable_clocks(0);
+ rfbi_runtime_put();
return 0;
+
+err_get_ick:
+ rfbi_runtime_put();
+err_get_rfbi:
+ pm_runtime_disable(&pdev->dev);
+ iounmap(rfbi.base);
+err_ioremap:
+ return r;
}
static int omap_rfbihw_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
iounmap(rfbi.base);
return 0;
}
+static int rfbi_runtime_suspend(struct device *dev)
+{
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int rfbi_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ goto err_get_dispc;
+
+ return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static const struct dev_pm_ops rfbi_pm_ops = {
+ .runtime_suspend = rfbi_runtime_suspend,
+ .runtime_resume = rfbi_runtime_resume,
+};
+
static struct platform_driver omap_rfbihw_driver = {
.probe = omap_rfbihw_probe,
.remove = omap_rfbihw_remove,
.driver = {
.name = "omapdss_rfbi",
.owner = THIS_MODULE,
+ .pm = &rfbi_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 0bd4b0350f8..3a688c871a4 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -20,13 +20,11 @@
#define DSS_SUBSYS_NAME "SDI"
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <video/omapdss.h>
-#include <plat/cpu.h>
#include "dss.h"
static struct {
@@ -60,14 +58,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
r = regulator_enable(sdi.vdds_sdi_reg);
if (r)
- goto err1;
+ goto err_reg_enable;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
sdi_basic_init(dssdev);
@@ -80,7 +84,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
r = dss_calc_clock_div(1, t->pixel_clock * 1000,
&dss_cinfo, &dispc_cinfo);
if (r)
- goto err2;
+ goto err_calc_clock_div;
fck = dss_cinfo.fck;
lck_div = dispc_cinfo.lck_div;
@@ -101,27 +105,34 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
r = dss_set_clock_div(&dss_cinfo);
if (r)
- goto err2;
+ goto err_set_dss_clock_div;
r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
- goto err2;
+ goto err_set_dispc_clock_div;
dss_sdi_init(dssdev->phy.sdi.datapairs);
r = dss_sdi_enable();
if (r)
- goto err1;
+ goto err_sdi_enable;
mdelay(2);
dssdev->manager->enable(dssdev->manager);
return 0;
-err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+
+err_sdi_enable:
+err_set_dispc_clock_div:
+err_set_dss_clock_div:
+err_calc_clock_div:
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
regulator_disable(sdi.vdds_sdi_reg);
-err1:
+err_reg_enable:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
return r;
}
EXPORT_SYMBOL(omapdss_sdi_display_enable);
@@ -132,7 +143,8 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
dss_sdi_disable();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
+ dss_runtime_put();
regulator_disable(sdi.vdds_sdi_reg);
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 980f919ed98..173c66430da 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -33,11 +33,13 @@
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/cpu.h>
#include "dss.h"
+#include "dss_features.h"
/* Venc registers */
#define VENC_REV_ID 0x00
@@ -292,6 +294,9 @@ static struct {
struct mutex venc_lock;
u32 wss_data;
struct regulator *vdda_dac_reg;
+
+ struct clk *tv_clk;
+ struct clk *tv_dac_clk;
} venc;
static inline void venc_write_reg(int idx, u32 val)
@@ -380,14 +385,25 @@ static void venc_reset(void)
#endif
}
-static void venc_enable_clocks(int enable)
+static int venc_runtime_get(void)
+{
+ int r;
+
+ DSSDBG("venc_runtime_get\n");
+
+ r = pm_runtime_get_sync(&venc.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void venc_runtime_put(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
- DSS_CLK_VIDFCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
- DSS_CLK_VIDFCK);
+ int r;
+
+ DSSDBG("venc_runtime_put\n");
+
+ r = pm_runtime_put(&venc.pdev->dev);
+ WARN_ON(r < 0);
}
static const struct venc_config *venc_timings_to_config(
@@ -406,8 +422,6 @@ static void venc_power_on(struct omap_dss_device *dssdev)
{
u32 l;
- venc_enable_clocks(1);
-
venc_reset();
venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
@@ -448,8 +462,6 @@ static void venc_power_off(struct omap_dss_device *dssdev)
dssdev->platform_disable(dssdev);
regulator_disable(venc.vdda_dac_reg);
-
- venc_enable_clocks(0);
}
@@ -487,6 +499,10 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
goto err1;
}
+ r = venc_runtime_get();
+ if (r)
+ goto err1;
+
venc_power_on(dssdev);
venc.wss_data = 0;
@@ -520,6 +536,8 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
venc_power_off(dssdev);
+ venc_runtime_put();
+
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
omap_dss_stop_device(dssdev);
@@ -538,20 +556,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
return venc_panel_enable(dssdev);
}
-static enum omap_dss_update_mode venc_get_update_mode(
- struct omap_dss_device *dssdev)
-{
- return OMAP_DSS_UPDATE_AUTO;
-}
-
-static int venc_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- if (mode != OMAP_DSS_UPDATE_AUTO)
- return -EINVAL;
- return 0;
-}
-
static void venc_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -598,6 +602,7 @@ static u32 venc_get_wss(struct omap_dss_device *dssdev)
static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
{
const struct venc_config *config;
+ int r;
DSSDBG("venc_set_wss\n");
@@ -608,16 +613,19 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
/* Invert due to VENC_L21_WC_CTL:INV=1 */
venc.wss_data = (wss ^ 0xfffff) << 8;
- venc_enable_clocks(1);
+ r = venc_runtime_get();
+ if (r)
+ goto err;
venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
venc.wss_data);
- venc_enable_clocks(0);
+ venc_runtime_put();
+err:
mutex_unlock(&venc.venc_lock);
- return 0;
+ return r;
}
static struct omap_dss_driver venc_driver = {
@@ -632,9 +640,6 @@ static struct omap_dss_driver venc_driver = {
.get_resolution = omapdss_default_get_resolution,
.get_recommended_bpp = omapdss_default_get_recommended_bpp,
- .set_update_mode = venc_set_update_mode,
- .get_update_mode = venc_get_update_mode,
-
.get_timings = venc_get_timings,
.set_timings = venc_set_timings,
.check_timings = venc_check_timings,
@@ -673,7 +678,8 @@ void venc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
- venc_enable_clocks(1);
+ if (venc_runtime_get())
+ return;
DUMPREG(VENC_F_CONTROL);
DUMPREG(VENC_VIDOUT_CTRL);
@@ -717,16 +723,56 @@ void venc_dump_regs(struct seq_file *s)
DUMPREG(VENC_OUTPUT_CONTROL);
DUMPREG(VENC_OUTPUT_TEST);
- venc_enable_clocks(0);
+ venc_runtime_put();
#undef DUMPREG
}
+static int venc_get_clocks(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get fck\n");
+ return PTR_ERR(clk);
+ }
+
+ venc.tv_clk = clk;
+
+ if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
+ if (cpu_is_omap34xx() || cpu_is_omap3630())
+ clk = clk_get(&pdev->dev, "dss_96m_fck");
+ else
+ clk = clk_get(&pdev->dev, "tv_dac_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get tv_dac_clk\n");
+ clk_put(venc.tv_clk);
+ return PTR_ERR(clk);
+ }
+ } else {
+ clk = NULL;
+ }
+
+ venc.tv_dac_clk = clk;
+
+ return 0;
+}
+
+static void venc_put_clocks(void)
+{
+ if (venc.tv_clk)
+ clk_put(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_put(venc.tv_dac_clk);
+}
+
/* VENC HW IP initialisation */
static int omap_venchw_probe(struct platform_device *pdev)
{
u8 rev_id;
struct resource *venc_mem;
+ int r;
venc.pdev = pdev;
@@ -737,22 +783,40 @@ static int omap_venchw_probe(struct platform_device *pdev)
venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
if (!venc_mem) {
DSSERR("can't get IORESOURCE_MEM VENC\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_ioremap;
}
venc.base = ioremap(venc_mem->start, resource_size(venc_mem));
if (!venc.base) {
DSSERR("can't ioremap VENC\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_ioremap;
}
- venc_enable_clocks(1);
+ r = venc_get_clocks(pdev);
+ if (r)
+ goto err_get_clk;
+
+ pm_runtime_enable(&pdev->dev);
+
+ r = venc_runtime_get();
+ if (r)
+ goto err_get_venc;
rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
- venc_enable_clocks(0);
+ venc_runtime_put();
return omap_dss_register_driver(&venc_driver);
+
+err_get_venc:
+ pm_runtime_disable(&pdev->dev);
+ venc_put_clocks();
+err_get_clk:
+ iounmap(venc.base);
+err_ioremap:
+ return r;
}
static int omap_venchw_remove(struct platform_device *pdev)
@@ -763,16 +827,61 @@ static int omap_venchw_remove(struct platform_device *pdev)
}
omap_dss_unregister_driver(&venc_driver);
+ pm_runtime_disable(&pdev->dev);
+ venc_put_clocks();
+
iounmap(venc.base);
return 0;
}
+static int venc_runtime_suspend(struct device *dev)
+{
+ if (venc.tv_dac_clk)
+ clk_disable(venc.tv_dac_clk);
+ clk_disable(venc.tv_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int venc_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ goto err_get_dispc;
+
+ clk_enable(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_enable(venc.tv_dac_clk);
+
+ return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static const struct dev_pm_ops venc_pm_ops = {
+ .runtime_suspend = venc_runtime_suspend,
+ .runtime_resume = venc_runtime_resume,
+};
+
static struct platform_driver omap_venchw_driver = {
.probe = omap_venchw_probe,
.remove = omap_venchw_remove,
.driver = {
.name = "omapdss_venc",
.owner = THIS_MODULE,
+ .pm = &venc_pm_ops,
},
};
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index cff450392b7..6b1ac23dbbd 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -316,67 +316,67 @@ int omapfb_update_window(struct fb_info *fbi,
}
EXPORT_SYMBOL(omapfb_update_window);
-static int omapfb_set_update_mode(struct fb_info *fbi,
+int omapfb_set_update_mode(struct fb_info *fbi,
enum omapfb_update_mode mode)
{
struct omap_dss_device *display = fb2display(fbi);
- enum omap_dss_update_mode um;
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb_display_data *d;
int r;
- if (!display || !display->driver->set_update_mode)
+ if (!display)
return -EINVAL;
- switch (mode) {
- case OMAPFB_UPDATE_DISABLED:
- um = OMAP_DSS_UPDATE_DISABLED;
- break;
+ if (mode != OMAPFB_AUTO_UPDATE && mode != OMAPFB_MANUAL_UPDATE)
+ return -EINVAL;
- case OMAPFB_AUTO_UPDATE:
- um = OMAP_DSS_UPDATE_AUTO;
- break;
+ omapfb_lock(fbdev);
- case OMAPFB_MANUAL_UPDATE:
- um = OMAP_DSS_UPDATE_MANUAL;
- break;
+ d = get_display_data(fbdev, display);
- default:
- return -EINVAL;
+ if (d->update_mode == mode) {
+ omapfb_unlock(fbdev);
+ return 0;
}
- r = display->driver->set_update_mode(display, um);
+ r = 0;
+
+ if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+ if (mode == OMAPFB_AUTO_UPDATE)
+ omapfb_start_auto_update(fbdev, display);
+ else /* MANUAL_UPDATE */
+ omapfb_stop_auto_update(fbdev, display);
+
+ d->update_mode = mode;
+ } else { /* AUTO_UPDATE */
+ if (mode == OMAPFB_MANUAL_UPDATE)
+ r = -EINVAL;
+ }
+
+ omapfb_unlock(fbdev);
return r;
}
-static int omapfb_get_update_mode(struct fb_info *fbi,
+int omapfb_get_update_mode(struct fb_info *fbi,
enum omapfb_update_mode *mode)
{
struct omap_dss_device *display = fb2display(fbi);
- enum omap_dss_update_mode m;
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb_display_data *d;
if (!display)
return -EINVAL;
- if (!display->driver->get_update_mode) {
- *mode = OMAPFB_AUTO_UPDATE;
- return 0;
- }
+ omapfb_lock(fbdev);
- m = display->driver->get_update_mode(display);
+ d = get_display_data(fbdev, display);
- switch (m) {
- case OMAP_DSS_UPDATE_DISABLED:
- *mode = OMAPFB_UPDATE_DISABLED;
- break;
- case OMAP_DSS_UPDATE_AUTO:
- *mode = OMAPFB_AUTO_UPDATE;
- break;
- case OMAP_DSS_UPDATE_MANUAL:
- *mode = OMAPFB_MANUAL_UPDATE;
- break;
- default:
- BUG();
- }
+ *mode = d->update_mode;
+
+ omapfb_unlock(fbdev);
return 0;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 505bc12a303..602b71a92d3 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -46,6 +46,10 @@ static char *def_vram;
static int def_vrfb;
static int def_rotate;
static int def_mirror;
+static bool auto_update;
+static unsigned int auto_update_freq;
+module_param(auto_update, bool, 0);
+module_param(auto_update_freq, uint, 0644);
#ifdef DEBUG
unsigned int omapfb_debug;
@@ -1242,6 +1246,7 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omap_dss_device *display = fb2display(fbi);
+ struct omapfb_display_data *d;
int r = 0;
if (!display)
@@ -1249,6 +1254,8 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
omapfb_lock(fbdev);
+ d = get_display_data(fbdev, display);
+
switch (blank) {
case FB_BLANK_UNBLANK:
if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
@@ -1257,6 +1264,11 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (display->driver->resume)
r = display->driver->resume(display);
+ if ((display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) &&
+ d->update_mode == OMAPFB_AUTO_UPDATE &&
+ !d->auto_update_work_enabled)
+ omapfb_start_auto_update(fbdev, display);
+
break;
case FB_BLANK_NORMAL:
@@ -1268,6 +1280,9 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
goto exit;
+ if (d->auto_update_work_enabled)
+ omapfb_stop_auto_update(fbdev, display);
+
if (display->driver->suspend)
r = display->driver->suspend(display);
@@ -1724,6 +1739,78 @@ err:
return r;
}
+static void omapfb_auto_update_work(struct work_struct *work)
+{
+ struct omap_dss_device *dssdev;
+ struct omap_dss_driver *dssdrv;
+ struct omapfb_display_data *d;
+ u16 w, h;
+ unsigned int freq;
+ struct omapfb2_device *fbdev;
+
+ d = container_of(work, struct omapfb_display_data,
+ auto_update_work.work);
+
+ dssdev = d->dssdev;
+ dssdrv = dssdev->driver;
+ fbdev = d->fbdev;
+
+ if (!dssdrv || !dssdrv->update)
+ return;
+
+ if (dssdrv->sync)
+ dssdrv->sync(dssdev);
+
+ dssdrv->get_resolution(dssdev, &w, &h);
+ dssdrv->update(dssdev, 0, 0, w, h);
+
+ freq = auto_update_freq;
+ if (freq == 0)
+ freq = 20;
+ queue_delayed_work(fbdev->auto_update_wq,
+ &d->auto_update_work, HZ / freq);
+}
+
+void omapfb_start_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display)
+{
+ struct omapfb_display_data *d;
+
+ if (fbdev->auto_update_wq == NULL) {
+ struct workqueue_struct *wq;
+
+ wq = create_singlethread_workqueue("omapfb_auto_update");
+
+ if (wq == NULL) {
+ dev_err(fbdev->dev, "Failed to create workqueue for "
+ "auto-update\n");
+ return;
+ }
+
+ fbdev->auto_update_wq = wq;
+ }
+
+ d = get_display_data(fbdev, display);
+
+ INIT_DELAYED_WORK(&d->auto_update_work, omapfb_auto_update_work);
+
+ d->auto_update_work_enabled = true;
+
+ omapfb_auto_update_work(&d->auto_update_work.work);
+}
+
+void omapfb_stop_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display)
+{
+ struct omapfb_display_data *d;
+
+ d = get_display_data(fbdev, display);
+
+ cancel_delayed_work_sync(&d->auto_update_work);
+
+ d->auto_update_work_enabled = false;
+}
+
/* initialize fb_info, var, fix to something sane based on the display */
static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
{
@@ -1858,10 +1945,21 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
}
for (i = 0; i < fbdev->num_displays; i++) {
- if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED)
- fbdev->displays[i]->driver->disable(fbdev->displays[i]);
+ struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
+
+ if (fbdev->displays[i].auto_update_work_enabled)
+ omapfb_stop_auto_update(fbdev, dssdev);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+ dssdev->driver->disable(dssdev);
+
+ omap_dss_put_device(dssdev);
+ }
- omap_dss_put_device(fbdev->displays[i]);
+ if (fbdev->auto_update_wq != NULL) {
+ flush_workqueue(fbdev->auto_update_wq);
+ destroy_workqueue(fbdev->auto_update_wq);
+ fbdev->auto_update_wq = NULL;
}
dev_set_drvdata(fbdev->dev, NULL);
@@ -2084,14 +2182,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
int r;
u8 bpp;
struct omap_video_timings timings, temp_timings;
+ struct omapfb_display_data *d;
r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
if (r)
return r;
- fbdev->bpp_overrides[fbdev->num_bpp_overrides].dssdev = display;
- fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp;
- ++fbdev->num_bpp_overrides;
+ d = get_display_data(fbdev, display);
+ d->bpp_override = bpp;
if (display->driver->check_timings) {
r = display->driver->check_timings(display, &timings);
@@ -2117,14 +2215,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev,
struct omap_dss_device *dssdev)
{
- int i;
+ struct omapfb_display_data *d;
BUG_ON(dssdev->driver->get_recommended_bpp == NULL);
- for (i = 0; i < fbdev->num_bpp_overrides; ++i) {
- if (dssdev == fbdev->bpp_overrides[i].dssdev)
- return fbdev->bpp_overrides[i].bpp;
- }
+ d = get_display_data(fbdev, dssdev);
+
+ if (d->bpp_override != 0)
+ return d->bpp_override;
return dssdev->driver->get_recommended_bpp(dssdev);
}
@@ -2156,9 +2254,9 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
display = NULL;
for (i = 0; i < fbdev->num_displays; ++i) {
- if (strcmp(fbdev->displays[i]->name,
+ if (strcmp(fbdev->displays[i].dssdev->name,
display_str) == 0) {
- display = fbdev->displays[i];
+ display = fbdev->displays[i].dssdev;
break;
}
}
@@ -2182,6 +2280,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
struct omap_dss_device *dssdev)
{
struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omapfb_display_data *d;
int r;
r = dssdrv->enable(dssdev);
@@ -2191,8 +2290,20 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return r;
}
+ d = get_display_data(fbdev, dssdev);
+
+ d->fbdev = fbdev;
+
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
u16 w, h;
+
+ if (auto_update) {
+ omapfb_start_auto_update(fbdev, dssdev);
+ d->update_mode = OMAPFB_AUTO_UPDATE;
+ } else {
+ d->update_mode = OMAPFB_MANUAL_UPDATE;
+ }
+
if (dssdrv->enable_te) {
r = dssdrv->enable_te(dssdev, 1);
if (r) {
@@ -2201,16 +2312,6 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
}
}
- if (dssdrv->set_update_mode) {
- r = dssdrv->set_update_mode(dssdev,
- OMAP_DSS_UPDATE_MANUAL);
- if (r) {
- dev_err(fbdev->dev,
- "Failed to set update mode\n");
- return r;
- }
- }
-
dssdrv->get_resolution(dssdev, &w, &h);
r = dssdrv->update(dssdev, 0, 0, w, h);
if (r) {
@@ -2219,15 +2320,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return r;
}
} else {
- if (dssdrv->set_update_mode) {
- r = dssdrv->set_update_mode(dssdev,
- OMAP_DSS_UPDATE_AUTO);
- if (r) {
- dev_err(fbdev->dev,
- "Failed to set update mode\n");
- return r;
- }
- }
+ d->update_mode = OMAPFB_AUTO_UPDATE;
}
return 0;
@@ -2275,6 +2368,8 @@ static int omapfb_probe(struct platform_device *pdev)
fbdev->num_displays = 0;
dssdev = NULL;
for_each_dss_dev(dssdev) {
+ struct omapfb_display_data *d;
+
omap_dss_get_device(dssdev);
if (!dssdev->driver) {
@@ -2282,7 +2377,12 @@ static int omapfb_probe(struct platform_device *pdev)
r = -ENODEV;
}
- fbdev->displays[fbdev->num_displays++] = dssdev;
+ d = &fbdev->displays[fbdev->num_displays++];
+ d->dssdev = dssdev;
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ d->update_mode = OMAPFB_MANUAL_UPDATE;
+ else
+ d->update_mode = OMAPFB_AUTO_UPDATE;
}
if (r)
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 2f5e817b2a9..153bf1aceeb 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -518,6 +518,39 @@ static ssize_t show_virt(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr);
}
+static ssize_t show_upd_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ enum omapfb_update_mode mode;
+ int r;
+
+ r = omapfb_get_update_mode(fbi, &mode);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode);
+}
+
+static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ unsigned mode;
+ int r;
+
+ r = kstrtouint(buf, 0, &mode);
+ if (r)
+ return r;
+
+ r = omapfb_set_update_mode(fbi, mode);
+ if (r)
+ return r;
+
+ return count;
+}
+
static struct device_attribute omapfb_attrs[] = {
__ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type,
store_rotate_type),
@@ -528,6 +561,7 @@ static struct device_attribute omapfb_attrs[] = {
store_overlays_rotate),
__ATTR(phys_addr, S_IRUGO, show_phys, NULL),
__ATTR(virt_addr, S_IRUGO, show_virt, NULL),
+ __ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode),
};
int omapfb_create_sysfs(struct omapfb2_device *fbdev)
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index aa1b1d97427..fdf0edeccf4 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -73,6 +73,15 @@ struct omapfb_info {
bool mirror;
};
+struct omapfb_display_data {
+ struct omapfb2_device *fbdev;
+ struct omap_dss_device *dssdev;
+ u8 bpp_override;
+ enum omapfb_update_mode update_mode;
+ bool auto_update_work_enabled;
+ struct delayed_work auto_update_work;
+};
+
struct omapfb2_device {
struct device *dev;
struct mutex mtx;
@@ -86,17 +95,13 @@ struct omapfb2_device {
struct omapfb2_mem_region regions[10];
unsigned num_displays;
- struct omap_dss_device *displays[10];
+ struct omapfb_display_data displays[10];
unsigned num_overlays;
struct omap_overlay *overlays[10];
unsigned num_managers;
struct omap_overlay_manager *managers[10];
- unsigned num_bpp_overrides;
- struct {
- struct omap_dss_device *dssdev;
- u8 bpp;
- } bpp_overrides[10];
+ struct workqueue_struct *auto_update_wq;
};
struct omapfb_colormode {
@@ -128,6 +133,13 @@ int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
u16 posx, u16 posy, u16 outw, u16 outh);
+void omapfb_start_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display);
+void omapfb_stop_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display);
+int omapfb_get_update_mode(struct fb_info *fbi, enum omapfb_update_mode *mode);
+int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode);
+
/* find the display connected to this fb, if any */
static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
{
@@ -143,6 +155,19 @@ static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
return NULL;
}
+static inline struct omapfb_display_data *get_display_data(
+ struct omapfb2_device *fbdev, struct omap_dss_device *dssdev)
+{
+ int i;
+
+ for (i = 0; i < fbdev->num_displays; ++i)
+ if (fbdev->displays[i].dssdev == dssdev)
+ return &fbdev->displays[i];
+
+ /* This should never happen */
+ BUG();
+}
+
static inline void omapfb_lock(struct omapfb2_device *fbdev)
{
mutex_lock(&fbdev->mtx);
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index 32549d177b1..dcaab9012ca 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -55,7 +55,7 @@
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
-#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR))
+#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) && (chip<=S3_PROSAVAGEDDR))
#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index f441726ddf2..86b0735e6aa 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -36,9 +36,6 @@ config WATCHDOG_CORE
and gives them the /dev/watchdog interface (and later also the
sysfs interface).
- To compile this driver as a module, choose M here: the module will
- be called watchdog.
-
config WATCHDOG_NOWAYOUT
bool "Disable watchdog shutdown on close"
help
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index afa78a54711..809f41c30c4 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -458,7 +458,15 @@ static int __devexit nv_tco_remove(struct platform_device *dev)
static void nv_tco_shutdown(struct platform_device *dev)
{
+ u32 val;
+
tco_timer_stop();
+
+ /* Some BIOSes fail the POST (once) if the NO_REBOOT flag is not
+ * unset during shutdown. */
+ pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+ val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+ pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
}
static struct platform_driver nv_tco_driver = {
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index db84f2322d1..a267dc078da 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -64,7 +64,7 @@
* misses its deadline, the kernel timer will allow the WDT to overflow.
*/
static int clock_division_ratio = WTCSR_CKS_4096;
-#define next_ping_period(cks) msecs_to_jiffies(cks - 4)
+#define next_ping_period(cks) (jiffies + msecs_to_jiffies(cks - 4))
static const struct watchdog_info sh_wdt_info;
static struct platform_device *sh_wdt_dev;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f815283667a..5f7ff8e2fc1 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -11,7 +11,7 @@ config XEN_BALLOON
config XEN_SELFBALLOONING
bool "Dynamically self-balloon kernel memory to target"
- depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP
+ depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP && XEN_TMEM
default n
help
Self-ballooning dynamically balloons available kernel memory driven
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index e9cb57f0754..9a1d4263075 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -182,11 +182,11 @@ int v9fs_set_create_acl(struct dentry *dentry,
return 0;
}
-int v9fs_acl_mode(struct inode *dir, mode_t *modep,
+int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl, struct posix_acl **pacl)
{
int retval = 0;
- mode_t mode = *modep;
+ umode_t mode = *modep;
struct posix_acl *acl = NULL;
if (!S_ISLNK(mode)) {
@@ -319,7 +319,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
retval = posix_acl_equiv_mode(acl, &mode);
if (retval < 0)
goto err_out;
diff --git a/fs/9p/acl.h b/fs/9p/acl.h
index ddb7ae19d97..55955641196 100644
--- a/fs/9p/acl.h
+++ b/fs/9p/acl.h
@@ -20,7 +20,7 @@ extern struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type);
extern int v9fs_acl_chmod(struct dentry *);
extern int v9fs_set_create_acl(struct dentry *,
struct posix_acl **, struct posix_acl **);
-extern int v9fs_acl_mode(struct inode *dir, mode_t *modep,
+extern int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl, struct posix_acl **pacl);
#else
#define v9fs_iop_get_acl NULL
@@ -38,7 +38,7 @@ static inline int v9fs_set_create_acl(struct dentry *dentry,
{
return 0;
}
-static inline int v9fs_acl_mode(struct inode *dir, mode_t *modep,
+static inline int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl,
struct posix_acl **pacl)
{
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 9a26dce5a99..b6c8ed20519 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -206,7 +206,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
int err = 0;
gid_t gid;
int flags;
- mode_t mode;
+ umode_t mode;
char *name = NULL;
struct file *filp;
struct p9_qid qid;
@@ -348,7 +348,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
struct p9_fid *fid = NULL, *dfid = NULL;
gid_t gid;
char *name;
- mode_t mode;
+ umode_t mode;
struct inode *inode;
struct p9_qid qid;
struct dentry *dir_dentry;
@@ -751,7 +751,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
int err;
gid_t gid;
char *name;
- mode_t mode;
+ umode_t mode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
diff --git a/fs/Kconfig b/fs/Kconfig
index 19891aab9c6..9fe0b349f4c 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -127,14 +127,21 @@ config TMPFS_POSIX_ACL
select TMPFS_XATTR
select GENERIC_ACL
help
- POSIX Access Control Lists (ACLs) support permissions for users and
- groups beyond the owner/group/world scheme.
+ POSIX Access Control Lists (ACLs) support additional access rights
+ for users and groups beyond the standard owner/group/world scheme,
+ and this option selects support for ACLs specifically for tmpfs
+ filesystems.
+
+ If you've selected TMPFS, it's possible that you'll also need
+ this option as there are a number of Linux distros that require
+ POSIX ACL support under /dev for certain features to work properly.
+ For example, some distros need this feature for ALSA-related /dev
+ files for sound to work properly. In short, if you're not sure,
+ say Y.
To learn more about Access Control Lists, visit the POSIX ACLs for
Linux website <http://acl.bestbits.at/>.
- If you don't know what Access Control Lists are, say N.
-
config TMPFS_XATTR
bool "Tmpfs extended attributes"
depends on TMPFS
diff --git a/fs/block_dev.c b/fs/block_dev.c
index f55aad4d161..ff77262e887 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -387,6 +387,10 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
struct inode *bd_inode = filp->f_mapping->host;
struct block_device *bdev = I_BDEV(bd_inode);
int error;
+
+ error = filemap_write_and_wait_range(filp->f_mapping, start, end);
+ if (error)
+ return error;
/*
* There is no need to serialise calls to blkdev_issue_flush with
@@ -552,6 +556,7 @@ struct block_device *bdget(dev_t dev)
if (inode->i_state & I_NEW) {
bdev->bd_contains = NULL;
+ bdev->bd_super = NULL;
bdev->bd_inode = inode;
bdev->bd_block_size = (1 << inode->i_blkbits);
bdev->bd_part_count = 0;
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 9b72dcf1cd2..40e6ac08c21 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -6,5 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
transaction.o inode.o file.o tree-defrag.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
- export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \
+ export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
+
+btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 65a735d8f6e..eb159aaa5a1 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -28,8 +28,6 @@
#include "btrfs_inode.h"
#include "xattr.h"
-#ifdef CONFIG_BTRFS_FS_POSIX_ACL
-
struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
{
int size;
@@ -111,7 +109,6 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
int ret, size = 0;
const char *name;
char *value = NULL;
- mode_t mode;
if (acl) {
ret = posix_acl_valid(acl);
@@ -122,13 +119,11 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
switch (type) {
case ACL_TYPE_ACCESS:
- mode = inode->i_mode;
name = POSIX_ACL_XATTR_ACCESS;
if (acl) {
- ret = posix_acl_equiv_mode(acl, &mode);
+ ret = posix_acl_equiv_mode(acl, &inode->i_mode);
if (ret < 0)
return ret;
- inode->i_mode = mode;
}
ret = 0;
break;
@@ -222,19 +217,16 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
}
if (IS_POSIXACL(dir) && acl) {
- mode_t mode = inode->i_mode;
-
if (S_ISDIR(inode->i_mode)) {
ret = btrfs_set_acl(trans, inode, acl,
ACL_TYPE_DEFAULT);
if (ret)
goto failed;
}
- ret = posix_acl_create(&acl, GFP_NOFS, &mode);
+ ret = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (ret < 0)
return ret;
- inode->i_mode = mode;
if (ret > 0) {
/* we need an acl */
ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
@@ -282,18 +274,3 @@ const struct xattr_handler btrfs_xattr_acl_access_handler = {
.get = btrfs_xattr_acl_get,
.set = btrfs_xattr_acl_set,
};
-
-#else /* CONFIG_BTRFS_FS_POSIX_ACL */
-
-int btrfs_acl_chmod(struct inode *inode)
-{
- return 0;
-}
-
-int btrfs_init_acl(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir)
-{
- return 0;
-}
-
-#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index bfe42b03eaf..8ec5d86f173 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -338,6 +338,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
u64 first_byte = disk_start;
struct block_device *bdev;
int ret;
+ int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
@@ -392,8 +393,11 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret);
- ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
- BUG_ON(ret);
+ if (!skip_sum) {
+ ret = btrfs_csum_one_bio(root, inode, bio,
+ start, 1);
+ BUG_ON(ret);
+ }
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret);
@@ -418,8 +422,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret);
- ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
- BUG_ON(ret);
+ if (!skip_sum) {
+ ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
+ BUG_ON(ret);
+ }
ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 365c4e1dde0..0469263e327 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2406,8 +2406,8 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
-int btrfs_set_root_node(struct btrfs_root_item *item,
- struct extent_buffer *node);
+void btrfs_set_root_node(struct btrfs_root_item *item,
+ struct extent_buffer *node);
void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
/* dir-item.c */
@@ -2523,6 +2523,14 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
#define PageChecked PageFsMisc
#endif
+/* This forces readahead on a given range of bytes in an inode */
+static inline void btrfs_force_ra(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *file,
+ pgoff_t offset, unsigned long req_size)
+{
+ page_cache_sync_readahead(mapping, ra, file, offset, req_size);
+}
+
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
int btrfs_set_inode_index(struct inode *dir, u64 *index);
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
@@ -2551,9 +2559,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio, unsigned long bio_flags);
-unsigned long btrfs_force_ra(struct address_space *mapping,
- struct file_ra_state *ra, struct file *file,
- pgoff_t offset, pgoff_t last_index);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
@@ -2648,12 +2653,21 @@ do { \
/* acl.c */
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
struct posix_acl *btrfs_get_acl(struct inode *inode, int type);
-#else
-#define btrfs_get_acl NULL
-#endif
int btrfs_init_acl(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir);
int btrfs_acl_chmod(struct inode *inode);
+#else
+#define btrfs_get_acl NULL
+static inline int btrfs_init_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
+{
+ return 0;
+}
+static inline int btrfs_acl_chmod(struct inode *inode)
+{
+ return 0;
+}
+#endif
/* relocation.c */
int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index c360a848d97..31d84e78129 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -198,8 +198,6 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
- struct btrfs_key found_key;
- struct extent_buffer *leaf;
key.objectid = dir;
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
@@ -209,18 +207,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
return ERR_PTR(ret);
- if (ret > 0) {
- if (path->slots[0] == 0)
- return NULL;
- path->slots[0]--;
- }
-
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
- if (found_key.objectid != dir ||
- btrfs_key_type(&found_key) != BTRFS_DIR_ITEM_KEY ||
- found_key.offset != key.offset)
+ if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
@@ -315,8 +302,6 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
- struct btrfs_key found_key;
- struct extent_buffer *leaf;
key.objectid = dir;
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
@@ -324,18 +309,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
return ERR_PTR(ret);
- if (ret > 0) {
- if (path->slots[0] == 0)
- return NULL;
- path->slots[0]--;
- }
-
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
- if (found_key.objectid != dir ||
- btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY ||
- found_key.offset != key.offset)
+ if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4d08ed79405..66bac226944 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -663,7 +663,9 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
struct btrfs_path *path;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
key.objectid = start;
key.offset = len;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
@@ -3272,6 +3274,9 @@ again:
}
ret = btrfs_alloc_chunk(trans, extent_root, flags);
+ if (ret < 0 && ret != -ENOSPC)
+ goto out;
+
spin_lock(&space_info->lock);
if (ret)
space_info->full = 1;
@@ -3281,6 +3286,7 @@ again:
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
+out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret;
}
@@ -4456,7 +4462,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
printk(KERN_ERR "umm, got %d back from search"
", was looking for %llu\n", ret,
(unsigned long long)bytenr);
- btrfs_print_leaf(extent_root, path->nodes[0]);
+ if (ret > 0)
+ btrfs_print_leaf(extent_root,
+ path->nodes[0]);
}
BUG_ON(ret);
extent_slot = path->slots[0];
@@ -5073,7 +5081,9 @@ have_block_group:
* group is does point to and try again
*/
if (!last_ptr_loop && last_ptr->block_group &&
- last_ptr->block_group != block_group) {
+ last_ptr->block_group != block_group &&
+ index <=
+ get_block_group_index(last_ptr->block_group)) {
btrfs_put_block_group(block_group);
block_group = last_ptr->block_group;
@@ -5501,7 +5511,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
@@ -6272,10 +6283,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
wc = kzalloc(sizeof(*wc), GFP_NOFS);
- BUG_ON(!wc);
+ if (!wc) {
+ btrfs_free_path(path);
+ return -ENOMEM;
+ }
trans = btrfs_start_transaction(tree_root, 0);
BUG_ON(IS_ERR(trans));
@@ -6538,8 +6553,6 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
u64 min_allocable_bytes;
int ret = -ENOSPC;
- if (cache->ro)
- return 0;
/*
* We need some metadata space and system metadata space for
@@ -6555,6 +6568,12 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
+
+ if (cache->ro) {
+ ret = 0;
+ goto out;
+ }
+
num_bytes = cache->key.offset - cache->reserved - cache->pinned -
cache->bytes_super - btrfs_block_group_used(&cache->item);
@@ -6568,7 +6587,7 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
cache->ro = 1;
ret = 0;
}
-
+out:
spin_unlock(&cache->lock);
spin_unlock(&sinfo->lock);
return ret;
@@ -7183,11 +7202,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cluster->refill_lock);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
inode = lookup_free_space_inode(root, block_group, path);
if (!IS_ERR(inode)) {
- btrfs_orphan_add(trans, inode);
+ ret = btrfs_orphan_add(trans, inode);
+ BUG_ON(ret);
clear_nlink(inode);
/* One for the block groups ref */
spin_lock(&block_group->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 067b1747421..d418164a35f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -254,14 +254,14 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
*
* This should be called with the tree lock held.
*/
-static int merge_state(struct extent_io_tree *tree,
- struct extent_state *state)
+static void merge_state(struct extent_io_tree *tree,
+ struct extent_state *state)
{
struct extent_state *other;
struct rb_node *other_node;
if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
- return 0;
+ return;
other_node = rb_prev(&state->rb_node);
if (other_node) {
@@ -287,19 +287,13 @@ static int merge_state(struct extent_io_tree *tree,
free_extent_state(other);
}
}
-
- return 0;
}
-static int set_state_cb(struct extent_io_tree *tree,
+static void set_state_cb(struct extent_io_tree *tree,
struct extent_state *state, int *bits)
{
- if (tree->ops && tree->ops->set_bit_hook) {
- return tree->ops->set_bit_hook(tree->mapping->host,
- state, bits);
- }
-
- return 0;
+ if (tree->ops && tree->ops->set_bit_hook)
+ tree->ops->set_bit_hook(tree->mapping->host, state, bits);
}
static void clear_state_cb(struct extent_io_tree *tree,
@@ -309,6 +303,9 @@ static void clear_state_cb(struct extent_io_tree *tree,
tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
}
+static void set_state_bits(struct extent_io_tree *tree,
+ struct extent_state *state, int *bits);
+
/*
* insert an extent_state struct into the tree. 'bits' are set on the
* struct before it is inserted.
@@ -324,8 +321,6 @@ static int insert_state(struct extent_io_tree *tree,
int *bits)
{
struct rb_node *node;
- int bits_to_set = *bits & ~EXTENT_CTLBITS;
- int ret;
if (end < start) {
printk(KERN_ERR "btrfs end < start %llu %llu\n",
@@ -335,13 +330,9 @@ static int insert_state(struct extent_io_tree *tree,
}
state->start = start;
state->end = end;
- ret = set_state_cb(tree, state, bits);
- if (ret)
- return ret;
- if (bits_to_set & EXTENT_DIRTY)
- tree->dirty_bytes += end - start + 1;
- state->state |= bits_to_set;
+ set_state_bits(tree, state, bits);
+
node = tree_insert(&tree->state, end, &state->rb_node);
if (node) {
struct extent_state *found;
@@ -357,13 +348,11 @@ static int insert_state(struct extent_io_tree *tree,
return 0;
}
-static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
+static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
u64 split)
{
if (tree->ops && tree->ops->split_extent_hook)
- return tree->ops->split_extent_hook(tree->mapping->host,
- orig, split);
- return 0;
+ tree->ops->split_extent_hook(tree->mapping->host, orig, split);
}
/*
@@ -659,34 +648,25 @@ again:
if (start > end)
break;
- if (need_resched()) {
- spin_unlock(&tree->lock);
- cond_resched();
- spin_lock(&tree->lock);
- }
+ cond_resched_lock(&tree->lock);
}
out:
spin_unlock(&tree->lock);
return 0;
}
-static int set_state_bits(struct extent_io_tree *tree,
+static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state,
int *bits)
{
- int ret;
int bits_to_set = *bits & ~EXTENT_CTLBITS;
- ret = set_state_cb(tree, state, bits);
- if (ret)
- return ret;
+ set_state_cb(tree, state, bits);
if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
u64 range = state->end - state->start + 1;
tree->dirty_bytes += range;
}
state->state |= bits_to_set;
-
- return 0;
}
static void cache_state(struct extent_state *state,
@@ -779,9 +759,7 @@ hit_next:
goto out;
}
- err = set_state_bits(tree, state, &bits);
- if (err)
- goto out;
+ set_state_bits(tree, state, &bits);
cache_state(state, cached_state);
merge_state(tree, state);
@@ -830,9 +808,7 @@ hit_next:
if (err)
goto out;
if (state->end <= end) {
- err = set_state_bits(tree, state, &bits);
- if (err)
- goto out;
+ set_state_bits(tree, state, &bits);
cache_state(state, cached_state);
merge_state(tree, state);
if (last_end == (u64)-1)
@@ -893,11 +869,7 @@ hit_next:
err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST);
- err = set_state_bits(tree, prealloc, &bits);
- if (err) {
- prealloc = NULL;
- goto out;
- }
+ set_state_bits(tree, prealloc, &bits);
cache_state(prealloc, cached_state);
merge_state(tree, prealloc);
prealloc = NULL;
@@ -1059,46 +1031,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
return 0;
}
-/*
- * find the first offset in the io tree with 'bits' set. zero is
- * returned if we find something, and *start_ret and *end_ret are
- * set to reflect the state struct that was found.
- *
- * If nothing was found, 1 is returned, < 0 on error
- */
-int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, int bits)
-{
- struct rb_node *node;
- struct extent_state *state;
- int ret = 1;
-
- spin_lock(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, start);
- if (!node)
- goto out;
-
- while (1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->end >= start && (state->state & bits)) {
- *start_ret = state->start;
- *end_ret = state->end;
- ret = 0;
- break;
- }
- node = rb_next(node);
- if (!node)
- break;
- }
-out:
- spin_unlock(&tree->lock);
- return ret;
-}
-
/* find the first state struct with 'bits' set after 'start', and
* return it. tree->lock must be held. NULL will returned if
* nothing was found after 'start'
@@ -1131,6 +1063,30 @@ out:
}
/*
+ * find the first offset in the io tree with 'bits' set. zero is
+ * returned if we find something, and *start_ret and *end_ret are
+ * set to reflect the state struct that was found.
+ *
+ * If nothing was found, 1 is returned, < 0 on error
+ */
+int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, int bits)
+{
+ struct extent_state *state;
+ int ret = 1;
+
+ spin_lock(&tree->lock);
+ state = find_first_extent_bit_state(tree, start, bits);
+ if (state) {
+ *start_ret = state->start;
+ *end_ret = state->end;
+ ret = 0;
+ }
+ spin_unlock(&tree->lock);
+ return ret;
+}
+
+/*
* find a contiguous range of bytes in the file marked as delalloc, not
* more than 'max_bytes'. start and end are used to return the range,
*
@@ -2546,7 +2502,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
struct writeback_control *wbc)
{
int ret;
- struct address_space *mapping = page->mapping;
struct extent_page_data epd = {
.bio = NULL,
.tree = tree,
@@ -2554,17 +2509,9 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
- struct writeback_control wbc_writepages = {
- .sync_mode = wbc->sync_mode,
- .nr_to_write = 64,
- .range_start = page_offset(page) + PAGE_CACHE_SIZE,
- .range_end = (loff_t)-1,
- };
ret = __extent_writepage(page, wbc, &epd);
- extent_write_cache_pages(tree, mapping, &wbc_writepages,
- __extent_writepage, &epd, flush_write_bio);
flush_epd_write_bio(&epd);
return ret;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 21a7ca9e728..7b2f0c3e792 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -76,15 +76,15 @@ struct extent_io_ops {
struct extent_state *state);
int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate);
- int (*set_bit_hook)(struct inode *inode, struct extent_state *state,
- int *bits);
- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
- int *bits);
- int (*merge_extent_hook)(struct inode *inode,
- struct extent_state *new,
- struct extent_state *other);
- int (*split_extent_hook)(struct inode *inode,
- struct extent_state *orig, u64 split);
+ void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
+ int *bits);
+ void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
+ int *bits);
+ void (*merge_extent_hook)(struct inode *inode,
+ struct extent_state *new,
+ struct extent_state *other);
+ void (*split_extent_hook)(struct inode *inode,
+ struct extent_state *orig, u64 split);
int (*write_cache_pages_lock_hook)(struct page *page);
};
@@ -108,8 +108,6 @@ struct extent_state {
wait_queue_head_t wq;
atomic_t refs;
unsigned long state;
- u64 split_start;
- u64 split_end;
/* for use by the FS */
u64 private;
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2d0410344ea..7c97b330145 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -183,22 +183,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
return 0;
}
-int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
+static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
{
- int ret = 0;
struct extent_map *merge = NULL;
struct rb_node *rb;
- struct extent_map *em;
-
- write_lock(&tree->lock);
- em = lookup_extent_mapping(tree, start, len);
-
- WARN_ON(!em || em->start != start);
-
- if (!em)
- goto out;
-
- clear_bit(EXTENT_FLAG_PINNED, &em->flags);
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
@@ -225,6 +213,24 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
merge->in_tree = 0;
free_extent_map(merge);
}
+}
+
+int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
+{
+ int ret = 0;
+ struct extent_map *em;
+
+ write_lock(&tree->lock);
+ em = lookup_extent_mapping(tree, start, len);
+
+ WARN_ON(!em || em->start != start);
+
+ if (!em)
+ goto out;
+
+ clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+
+ try_merge_map(tree, em);
free_extent_map(em);
out:
@@ -247,7 +253,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em)
{
int ret = 0;
- struct extent_map *merge = NULL;
struct rb_node *rb;
struct extent_map *exist;
@@ -263,30 +268,8 @@ int add_extent_mapping(struct extent_map_tree *tree,
goto out;
}
atomic_inc(&em->refs);
- if (em->start != 0) {
- rb = rb_prev(&em->rb_node);
- if (rb)
- merge = rb_entry(rb, struct extent_map, rb_node);
- if (rb && mergable_maps(merge, em)) {
- em->start = merge->start;
- em->len += merge->len;
- em->block_len += merge->block_len;
- em->block_start = merge->block_start;
- merge->in_tree = 0;
- rb_erase(&merge->rb_node, &tree->map);
- free_extent_map(merge);
- }
- }
- rb = rb_next(&em->rb_node);
- if (rb)
- merge = rb_entry(rb, struct extent_map, rb_node);
- if (rb && mergable_maps(em, merge)) {
- em->len += merge->len;
- em->block_len += merge->len;
- rb_erase(&merge->rb_node, &tree->map);
- merge->in_tree = 0;
- free_extent_map(merge);
- }
+
+ try_merge_map(tree, em);
out:
return ret;
}
@@ -299,19 +282,8 @@ static u64 range_end(u64 start, u64 len)
return start + len;
}
-/**
- * lookup_extent_mapping - lookup extent_map
- * @tree: tree to lookup in
- * @start: byte offset to start the search
- * @len: length of the lookup range
- *
- * Find and return the first extent_map struct in @tree that intersects the
- * [start, len] range. There may be additional objects in the tree that
- * intersect, so check the object returned carefully to make sure that no
- * additional lookups are needed.
- */
-struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len)
+struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len, int strict)
{
struct extent_map *em;
struct rb_node *rb_node;
@@ -320,38 +292,42 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 end = range_end(start, len);
rb_node = __tree_search(&tree->map, start, &prev, &next);
- if (!rb_node && prev) {
- em = rb_entry(prev, struct extent_map, rb_node);
- if (end > em->start && start < extent_map_end(em))
- goto found;
- }
- if (!rb_node && next) {
- em = rb_entry(next, struct extent_map, rb_node);
- if (end > em->start && start < extent_map_end(em))
- goto found;
- }
if (!rb_node) {
- em = NULL;
- goto out;
- }
- if (IS_ERR(rb_node)) {
- em = ERR_CAST(rb_node);
- goto out;
+ if (prev)
+ rb_node = prev;
+ else if (next)
+ rb_node = next;
+ else
+ return NULL;
}
+
em = rb_entry(rb_node, struct extent_map, rb_node);
- if (end > em->start && start < extent_map_end(em))
- goto found;
- em = NULL;
- goto out;
+ if (strict && !(end > em->start && start < extent_map_end(em)))
+ return NULL;
-found:
atomic_inc(&em->refs);
-out:
return em;
}
/**
+ * lookup_extent_mapping - lookup extent_map
+ * @tree: tree to lookup in
+ * @start: byte offset to start the search
+ * @len: length of the lookup range
+ *
+ * Find and return the first extent_map struct in @tree that intersects the
+ * [start, len] range. There may be additional objects in the tree that
+ * intersect, so check the object returned carefully to make sure that no
+ * additional lookups are needed.
+ */
+struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len)
+{
+ return __lookup_extent_mapping(tree, start, len, 1);
+}
+
+/**
* search_extent_mapping - find a nearby extent map
* @tree: tree to lookup in
* @start: byte offset to start the search
@@ -365,38 +341,7 @@ out:
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len)
{
- struct extent_map *em;
- struct rb_node *rb_node;
- struct rb_node *prev = NULL;
- struct rb_node *next = NULL;
-
- rb_node = __tree_search(&tree->map, start, &prev, &next);
- if (!rb_node && prev) {
- em = rb_entry(prev, struct extent_map, rb_node);
- goto found;
- }
- if (!rb_node && next) {
- em = rb_entry(next, struct extent_map, rb_node);
- goto found;
- }
- if (!rb_node) {
- em = NULL;
- goto out;
- }
- if (IS_ERR(rb_node)) {
- em = ERR_CAST(rb_node);
- goto out;
- }
- em = rb_entry(rb_node, struct extent_map, rb_node);
- goto found;
-
- em = NULL;
- goto out;
-
-found:
- atomic_inc(&em->refs);
-out:
- return em;
+ return __lookup_extent_mapping(tree, start, len, 0);
}
/**
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 08bcfa92a22..b910694f61e 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -291,7 +291,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
if (search_commit) {
path->skip_locking = 1;
@@ -677,7 +678,9 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
btrfs_super_csum_size(&root->fs_info->super_copy);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
sector_sum = sums->sums;
again:
next_offset = (u64)-1;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a35e51c9f23..658d66959ab 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -74,7 +74,7 @@ struct inode_defrag {
* If an existing record is found the defrag item you
* pass in is freed
*/
-static int __btrfs_add_inode_defrag(struct inode *inode,
+static void __btrfs_add_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -106,11 +106,11 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
BTRFS_I(inode)->in_defrag = 1;
rb_link_node(&defrag->rb_node, parent, p);
rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
- return 0;
+ return;
exists:
kfree(defrag);
- return 0;
+ return;
}
@@ -123,7 +123,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *defrag;
- int ret = 0;
u64 transid;
if (!btrfs_test_opt(root, AUTO_DEFRAG))
@@ -150,9 +149,9 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->defrag_inodes_lock);
if (!BTRFS_I(inode)->in_defrag)
- ret = __btrfs_add_inode_defrag(inode, defrag);
+ __btrfs_add_inode_defrag(inode, defrag);
spin_unlock(&root->fs_info->defrag_inodes_lock);
- return ret;
+ return 0;
}
/*
@@ -855,7 +854,8 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
btrfs_drop_extent_cache(inode, start, end - 1, 0);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
again:
recow = 0;
split = start;
@@ -1059,7 +1059,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos)
static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
struct page **pages, size_t num_pages,
loff_t pos, unsigned long first_index,
- unsigned long last_index, size_t write_bytes)
+ size_t write_bytes)
{
struct extent_state *cached_state = NULL;
int i;
@@ -1159,7 +1159,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
unsigned long first_index;
- unsigned long last_index;
size_t num_written = 0;
int nrptrs;
int ret = 0;
@@ -1172,7 +1171,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
return -ENOMEM;
first_index = pos >> PAGE_CACHE_SHIFT;
- last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT;
while (iov_iter_count(i) > 0) {
size_t offset = pos & (PAGE_CACHE_SIZE - 1);
@@ -1206,8 +1204,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* contents of pages from loop to loop
*/
ret = prepare_pages(root, file, pages, num_pages,
- pos, first_index, last_index,
- write_bytes);
+ pos, first_index, write_bytes);
if (ret) {
btrfs_delalloc_release_space(inode,
num_pages << PAGE_CACHE_SHIFT);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 13e6255182e..15fceefbca0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1061,7 +1061,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
nolock = btrfs_is_free_space_inode(root, inode);
@@ -1282,17 +1283,16 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
return ret;
}
-static int btrfs_split_extent_hook(struct inode *inode,
- struct extent_state *orig, u64 split)
+static void btrfs_split_extent_hook(struct inode *inode,
+ struct extent_state *orig, u64 split)
{
/* not delalloc, ignore it */
if (!(orig->state & EXTENT_DELALLOC))
- return 0;
+ return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
- return 0;
}
/*
@@ -1301,18 +1301,17 @@ static int btrfs_split_extent_hook(struct inode *inode,
* extents, such as when we are doing sequential writes, so we can properly
* account for the metadata space we'll need.
*/
-static int btrfs_merge_extent_hook(struct inode *inode,
- struct extent_state *new,
- struct extent_state *other)
+static void btrfs_merge_extent_hook(struct inode *inode,
+ struct extent_state *new,
+ struct extent_state *other)
{
/* not delalloc, ignore it */
if (!(other->state & EXTENT_DELALLOC))
- return 0;
+ return;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock);
- return 0;
}
/*
@@ -1320,8 +1319,8 @@ static int btrfs_merge_extent_hook(struct inode *inode,
* bytes in this file, and to maintain the list of inodes that
* have pending delalloc work to be done.
*/
-static int btrfs_set_bit_hook(struct inode *inode,
- struct extent_state *state, int *bits)
+static void btrfs_set_bit_hook(struct inode *inode,
+ struct extent_state *state, int *bits)
{
/*
@@ -1351,14 +1350,13 @@ static int btrfs_set_bit_hook(struct inode *inode,
}
spin_unlock(&root->fs_info->delalloc_lock);
}
- return 0;
}
/*
* extent_io.c clear_bit_hook, see set_bit_hook for why
*/
-static int btrfs_clear_bit_hook(struct inode *inode,
- struct extent_state *state, int *bits)
+static void btrfs_clear_bit_hook(struct inode *inode,
+ struct extent_state *state, int *bits)
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
@@ -1395,7 +1393,6 @@ static int btrfs_clear_bit_hook(struct inode *inode,
}
spin_unlock(&root->fs_info->delalloc_lock);
}
- return 0;
}
/*
@@ -1645,7 +1642,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
int ret;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
path->leave_spinning = 1;
@@ -2215,7 +2213,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
if (!root->orphan_block_rsv) {
block_rsv = btrfs_alloc_block_rsv(root);
- BUG_ON(!block_rsv);
+ if (!block_rsv)
+ return -ENOMEM;
}
spin_lock(&root->orphan_lock);
@@ -2517,7 +2516,9 @@ static void btrfs_read_locked_inode(struct inode *inode)
filled = true;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ goto make_bad;
+
path->leave_spinning = 1;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
@@ -2998,13 +2999,16 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len);
- BUG_ON(ret);
+ if (ret)
+ goto out;
if (inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, inode);
- BUG_ON(ret);
+ if (ret)
+ goto out;
}
+out:
nr = trans->blocks_used;
__unlink_end_trans(trans, root);
btrfs_btree_balance_dirty(root, nr);
@@ -3147,6 +3151,11 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->reada = -1;
+
if (root->ref_cows || root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
@@ -3159,10 +3168,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
if (min_type == 0 && root == BTRFS_I(inode)->root)
btrfs_kill_delayed_inode_items(inode);
- path = btrfs_alloc_path();
- BUG_ON(!path);
- path->reada = -1;
-
key.objectid = ino;
key.offset = (u64)-1;
key.type = (u8)-1;
@@ -3690,7 +3695,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
int ret = 0;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
namelen, 0);
@@ -3946,6 +3952,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *new)
{
struct inode *inode;
+ int bad_inode = 0;
inode = btrfs_iget_locked(s, location->objectid, root);
if (!inode)
@@ -3955,10 +3962,19 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
btrfs_read_locked_inode(inode);
- inode_tree_add(inode);
- unlock_new_inode(inode);
- if (new)
- *new = 1;
+ if (!is_bad_inode(inode)) {
+ inode_tree_add(inode);
+ unlock_new_inode(inode);
+ if (new)
+ *new = 1;
+ } else {
+ bad_inode = 1;
+ }
+ }
+
+ if (bad_inode) {
+ iput(inode);
+ inode = ERR_PTR(-ESTALE);
}
return inode;
@@ -3993,12 +4009,19 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
struct btrfs_root *sub_root = root;
struct btrfs_key location;
int index;
- int ret;
+ int ret = 0;
if (dentry->d_name.len > BTRFS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- ret = btrfs_inode_by_name(dir, dentry, &location);
+ if (unlikely(d_need_lookup(dentry))) {
+ memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
+ kfree(dentry->d_fsdata);
+ dentry->d_fsdata = NULL;
+ d_clear_need_lookup(dentry);
+ } else {
+ ret = btrfs_inode_by_name(dir, dentry, &location);
+ }
if (ret < 0)
return ERR_PTR(ret);
@@ -4053,6 +4076,12 @@ static int btrfs_dentry_delete(const struct dentry *dentry)
return 0;
}
+static void btrfs_dentry_release(struct dentry *dentry)
+{
+ if (dentry->d_fsdata)
+ kfree(dentry->d_fsdata);
+}
+
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
@@ -4075,6 +4104,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
struct btrfs_path *path;
struct list_head ins_list;
struct list_head del_list;
+ struct qstr q;
int ret;
struct extent_buffer *leaf;
int slot;
@@ -4164,6 +4194,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
while (di_cur < di_total) {
struct btrfs_key location;
+ struct dentry *tmp;
if (verify_dir_item(root, leaf, di))
break;
@@ -4184,6 +4215,33 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
btrfs_dir_item_key_to_cpu(leaf, di, &location);
+ q.name = name_ptr;
+ q.len = name_len;
+ q.hash = full_name_hash(q.name, q.len);
+ tmp = d_lookup(filp->f_dentry, &q);
+ if (!tmp) {
+ struct btrfs_key *newkey;
+
+ newkey = kzalloc(sizeof(struct btrfs_key),
+ GFP_NOFS);
+ if (!newkey)
+ goto no_dentry;
+ tmp = d_alloc(filp->f_dentry, &q);
+ if (!tmp) {
+ kfree(newkey);
+ dput(tmp);
+ goto no_dentry;
+ }
+ memcpy(newkey, &location,
+ sizeof(struct btrfs_key));
+ tmp->d_fsdata = newkey;
+ tmp->d_flags |= DCACHE_NEED_LOOKUP;
+ d_rehash(tmp);
+ dput(tmp);
+ } else {
+ dput(tmp);
+ }
+no_dentry:
/* is this a reference to our own snapshot? If so
* skip it
*/
@@ -4409,7 +4467,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
int owner;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return ERR_PTR(-ENOMEM);
inode = new_inode(root->fs_info->sb);
if (!inode) {
@@ -6669,19 +6728,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
return 0;
}
-/* helper function for file defrag and space balancing. This
- * forces readahead on a given range of bytes in an inode
- */
-unsigned long btrfs_force_ra(struct address_space *mapping,
- struct file_ra_state *ra, struct file *file,
- pgoff_t offset, pgoff_t last_index)
-{
- pgoff_t req_size = last_index - offset + 1;
-
- page_cache_sync_readahead(mapping, ra, file, offset, req_size);
- return offset + req_size;
-}
-
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
struct btrfs_inode *ei;
@@ -7164,7 +7210,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_unlock;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path) {
+ err = -ENOMEM;
+ drop_inode = 1;
+ goto out_unlock;
+ }
key.objectid = btrfs_ino(inode);
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
@@ -7430,4 +7480,5 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
const struct dentry_operations btrfs_dentry_operations = {
.d_delete = btrfs_dentry_delete,
+ .d_release = btrfs_dentry_release,
};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0b980afc5ed..7cf01334994 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1749,11 +1749,10 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
-
}
if (ptr < name)
goto out;
- memcpy(name, ptr, total_len);
+ memmove(name, ptr, total_len);
name[total_len]='\0';
ret = 0;
out:
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
deleted file mode 100644
index 82d569cb626..00000000000
--- a/fs/btrfs/ref-cache.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2008 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/sort.h>
-#include "ctree.h"
-#include "ref-cache.h"
-#include "transaction.h"
-
-static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
- struct rb_node *node)
-{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct btrfs_leaf_ref *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
-
- if (bytenr < entry->bytenr)
- p = &(*p)->rb_left;
- else if (bytenr > entry->bytenr)
- p = &(*p)->rb_right;
- else
- return parent;
- }
-
- entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
- return NULL;
-}
-
-static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
-{
- struct rb_node *n = root->rb_node;
- struct btrfs_leaf_ref *entry;
-
- while (n) {
- entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
- WARN_ON(!entry->in_tree);
-
- if (bytenr < entry->bytenr)
- n = n->rb_left;
- else if (bytenr > entry->bytenr)
- n = n->rb_right;
- else
- return n;
- }
- return NULL;
-}
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
deleted file mode 100644
index 24f7001f638..00000000000
--- a/fs/btrfs/ref-cache.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2008 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-#ifndef __REFCACHE__
-#define __REFCACHE__
-
-struct btrfs_extent_info {
- /* bytenr and num_bytes find the extent in the extent allocation tree */
- u64 bytenr;
- u64 num_bytes;
-
- /* objectid and offset find the back reference for the file */
- u64 objectid;
- u64 offset;
-};
-
-struct btrfs_leaf_ref {
- struct rb_node rb_node;
- struct btrfs_leaf_ref_tree *tree;
- int in_tree;
- atomic_t usage;
-
- u64 root_gen;
- u64 bytenr;
- u64 owner;
- u64 generation;
- int nritems;
-
- struct list_head list;
- struct btrfs_extent_info extents[];
-};
-
-static inline size_t btrfs_leaf_ref_size(int nr_extents)
-{
- return sizeof(struct btrfs_leaf_ref) +
- sizeof(struct btrfs_extent_info) * nr_extents;
-}
-#endif
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index ebe45443de0..f4099904565 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -71,13 +71,12 @@ out:
return ret;
}
-int btrfs_set_root_node(struct btrfs_root_item *item,
- struct extent_buffer *node)
+void btrfs_set_root_node(struct btrfs_root_item *item,
+ struct extent_buffer *node)
{
btrfs_set_root_bytenr(item, node->start);
btrfs_set_root_level(item, btrfs_header_level(node));
btrfs_set_root_generation(item, btrfs_header_generation(node));
- return 0;
}
/*
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index eb55863bb4a..7dc36fab4af 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -216,17 +216,11 @@ static void wait_current_trans(struct btrfs_root *root)
spin_lock(&root->fs_info->trans_lock);
cur_trans = root->fs_info->running_transaction;
if (cur_trans && cur_trans->blocked) {
- DEFINE_WAIT(wait);
atomic_inc(&cur_trans->use_count);
spin_unlock(&root->fs_info->trans_lock);
- while (1) {
- prepare_to_wait(&root->fs_info->transaction_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (!cur_trans->blocked)
- break;
- schedule();
- }
- finish_wait(&root->fs_info->transaction_wait, &wait);
+
+ wait_event(root->fs_info->transaction_wait,
+ !cur_trans->blocked);
put_transaction(cur_trans);
} else {
spin_unlock(&root->fs_info->trans_lock);
@@ -357,19 +351,10 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
}
/* wait for a transaction commit to be fully complete */
-static noinline int wait_for_commit(struct btrfs_root *root,
+static noinline void wait_for_commit(struct btrfs_root *root,
struct btrfs_transaction *commit)
{
- DEFINE_WAIT(wait);
- while (!commit->commit_done) {
- prepare_to_wait(&commit->commit_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (commit->commit_done)
- break;
- schedule();
- }
- finish_wait(&commit->commit_wait, &wait);
- return 0;
+ wait_event(commit->commit_wait, commit->commit_done);
}
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
@@ -1085,22 +1070,7 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
static void wait_current_trans_commit_start(struct btrfs_root *root,
struct btrfs_transaction *trans)
{
- DEFINE_WAIT(wait);
-
- if (trans->in_commit)
- return;
-
- while (1) {
- prepare_to_wait(&root->fs_info->transaction_blocked_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (trans->in_commit) {
- finish_wait(&root->fs_info->transaction_blocked_wait,
- &wait);
- break;
- }
- schedule();
- finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
- }
+ wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
}
/*
@@ -1110,24 +1080,8 @@ static void wait_current_trans_commit_start(struct btrfs_root *root,
static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
struct btrfs_transaction *trans)
{
- DEFINE_WAIT(wait);
-
- if (trans->commit_done || (trans->in_commit && !trans->blocked))
- return;
-
- while (1) {
- prepare_to_wait(&root->fs_info->transaction_wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (trans->commit_done ||
- (trans->in_commit && !trans->blocked)) {
- finish_wait(&root->fs_info->transaction_wait,
- &wait);
- break;
- }
- schedule();
- finish_wait(&root->fs_info->transaction_wait,
- &wait);
- }
+ wait_event(root->fs_info->transaction_wait,
+ trans->commit_done || (trans->in_commit && !trans->blocked));
}
/*
@@ -1234,8 +1188,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
atomic_inc(&cur_trans->use_count);
btrfs_end_transaction(trans, root);
- ret = wait_for_commit(root, cur_trans);
- BUG_ON(ret);
+ wait_for_commit(root, cur_trans);
put_transaction(cur_trans);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ac278dd8317..babee65f8ed 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1617,7 +1617,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
return 0;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
nritems = btrfs_header_nritems(eb);
for (i = 0; i < nritems; i++) {
@@ -1723,7 +1724,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
return -ENOMEM;
if (*level == 1) {
- wc->process_func(root, next, wc, ptr_gen);
+ ret = wc->process_func(root, next, wc, ptr_gen);
+ if (ret)
+ return ret;
path->slots[*level]++;
if (wc->free) {
@@ -1788,8 +1791,11 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
parent = path->nodes[*level + 1];
root_owner = btrfs_header_owner(parent);
- wc->process_func(root, path->nodes[*level], wc,
+ ret = wc->process_func(root, path->nodes[*level], wc,
btrfs_header_generation(path->nodes[*level]));
+ if (ret)
+ return ret;
+
if (wc->free) {
struct extent_buffer *next;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b89e372c754..53875ae73ad 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1037,7 +1037,8 @@ static noinline int find_next_chunk(struct btrfs_root *root,
struct btrfs_key found_key;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
key.objectid = objectid;
key.offset = (u64)-1;
@@ -2061,8 +2062,10 @@ int btrfs_balance(struct btrfs_root *dev_root)
/* step two, relocate all the chunks */
path = btrfs_alloc_path();
- BUG_ON(!path);
-
+ if (!path) {
+ ret = -ENOMEM;
+ goto error;
+ }
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
@@ -2661,7 +2664,8 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
ret = find_next_chunk(fs_info->chunk_root,
BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
- BUG_ON(ret);
+ if (ret)
+ return ret;
alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
(fs_info->metadata_alloc_profile &
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 8d8f28c94c0..6873bb634a9 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -141,10 +141,11 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
if (rc < 0) {
- cERROR(1, "%s: Failed to resolve server part of %s to IP: %d",
- __func__, *devname, rc);
+ cFYI(1, "%s: Failed to resolve server part of %s to IP: %d",
+ __func__, *devname, rc);
goto compose_mount_options_err;
}
+
/* md_len = strlen(...) + 12 for 'sep+prefixpath='
* assuming that we have 'unc=' and 'ip=' in
* the original sb_mountdata
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 259991bd211..e76bfeb6826 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -87,9 +87,15 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
- if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
+ if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
+ server->tcpStatus == CifsNeedNegotiate)
return rc;
+ if (!server->session_estab) {
+ strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
+ return rc;
+ }
+
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
@@ -178,9 +184,15 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
- if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
+ if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
+ server->tcpStatus == CifsNeedNegotiate)
return rc;
+ if (!server->session_estab) {
+ strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
+ return rc;
+ }
+
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 86551747096..f93eb948d07 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -86,24 +86,6 @@ extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
-void
-cifs_sb_active(struct super_block *sb)
-{
- struct cifs_sb_info *server = CIFS_SB(sb);
-
- if (atomic_inc_return(&server->active) == 1)
- atomic_inc(&sb->s_active);
-}
-
-void
-cifs_sb_deactive(struct super_block *sb)
-{
- struct cifs_sb_info *server = CIFS_SB(sb);
-
- if (atomic_dec_and_test(&server->active))
- deactivate_super(sb);
-}
-
static int
cifs_read_super(struct super_block *sb)
{
@@ -581,6 +563,10 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
mutex_unlock(&dir->i_mutex);
dput(dentry);
dentry = child;
+ if (!dentry->d_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOENT);
+ }
} while (!IS_ERR(dentry));
_FreeXid(xid);
kfree(full_path);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index fbd050c8d52..cb71dc1f94d 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -41,10 +41,6 @@ extern struct file_system_type cifs_fs_type;
extern const struct address_space_operations cifs_addr_ops;
extern const struct address_space_operations cifs_addr_ops_smallbuf;
-/* Functions related to super block operations */
-extern void cifs_sb_active(struct super_block *sb);
-extern void cifs_sb_deactive(struct super_block *sb);
-
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
extern struct inode *cifs_root_iget(struct super_block *);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1fcf4e5b311..38ce6d44b14 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -942,8 +942,6 @@ GLOBAL_EXTERN spinlock_t siduidlock;
GLOBAL_EXTERN spinlock_t sidgidlock;
void cifs_oplock_break(struct work_struct *work);
-void cifs_oplock_break_get(struct cifsFileInfo *cfile);
-void cifs_oplock_break_put(struct cifsFileInfo *cfile);
extern const struct slow_work_ops cifs_oplock_break_ops;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 1a9fe7f816d..aac37d99a48 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -107,7 +107,7 @@ static void mark_open_files_invalid(struct cifs_tcon *pTcon)
static int
cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
{
- int rc = 0;
+ int rc;
struct cifs_ses *ses;
struct TCP_Server_Info *server;
struct nls_table *nls_codepage;
@@ -5720,6 +5720,7 @@ CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
char *temp_ptr;
char *end_of_smb;
__u16 params, byte_count, data_offset;
+ unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
cFYI(1, "In Query All EAs path %s", searchName);
QAllEAsRetry:
@@ -5837,7 +5838,8 @@ QAllEAsRetry:
}
if (ea_name) {
- if (strncmp(ea_name, temp_ptr, name_len) == 0) {
+ if (ea_name_len == name_len &&
+ strncmp(ea_name, temp_ptr, name_len) == 0) {
temp_ptr += name_len + 1;
rc = value_len;
if (buf_size == 0)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e66297bad41..80c2e3add3a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -319,25 +319,328 @@ requeue_echo:
queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
}
+static bool
+allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size,
+ bool is_large_buf)
+{
+ char *bbuf = *bigbuf, *sbuf = *smallbuf;
+
+ if (bbuf == NULL) {
+ bbuf = (char *)cifs_buf_get();
+ if (!bbuf) {
+ cERROR(1, "No memory for large SMB response");
+ msleep(3000);
+ /* retry will check if exiting */
+ return false;
+ }
+ } else if (is_large_buf) {
+ /* we are reusing a dirty large buf, clear its start */
+ memset(bbuf, 0, size);
+ }
+
+ if (sbuf == NULL) {
+ sbuf = (char *)cifs_small_buf_get();
+ if (!sbuf) {
+ cERROR(1, "No memory for SMB response");
+ msleep(1000);
+ /* retry will check if exiting */
+ return false;
+ }
+ /* beginning of smb buffer is cleared in our buf_get */
+ } else {
+ /* if existing small buf clear beginning */
+ memset(sbuf, 0, size);
+ }
+
+ *bigbuf = bbuf;
+ *smallbuf = sbuf;
+
+ return true;
+}
+
+static int
+read_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg,
+ struct kvec *iov, unsigned int to_read,
+ unsigned int *ptotal_read, bool is_header_read)
+{
+ int length, rc = 0;
+ unsigned int total_read;
+ char *buf = iov->iov_base;
+
+ for (total_read = 0; total_read < to_read; total_read += length) {
+ length = kernel_recvmsg(server->ssocket, smb_msg, iov, 1,
+ to_read - total_read, 0);
+ if (server->tcpStatus == CifsExiting) {
+ /* then will exit */
+ rc = 2;
+ break;
+ } else if (server->tcpStatus == CifsNeedReconnect) {
+ cifs_reconnect(server);
+ /* Reconnect wakes up rspns q */
+ /* Now we will reread sock */
+ rc = 1;
+ break;
+ } else if (length == -ERESTARTSYS ||
+ length == -EAGAIN ||
+ length == -EINTR) {
+ /*
+ * Minimum sleep to prevent looping, allowing socket
+ * to clear and app threads to set tcpStatus
+ * CifsNeedReconnect if server hung.
+ */
+ usleep_range(1000, 2000);
+ length = 0;
+ if (!is_header_read)
+ continue;
+ /* Special handling for header read */
+ if (total_read) {
+ iov->iov_base = (to_read - total_read) +
+ buf;
+ iov->iov_len = to_read - total_read;
+ smb_msg->msg_control = NULL;
+ smb_msg->msg_controllen = 0;
+ rc = 3;
+ } else
+ rc = 1;
+ break;
+ } else if (length <= 0) {
+ cERROR(1, "Received no data, expecting %d",
+ to_read - total_read);
+ cifs_reconnect(server);
+ rc = 1;
+ break;
+ }
+ }
+
+ *ptotal_read = total_read;
+ return rc;
+}
+
+static bool
+check_rfc1002_header(struct TCP_Server_Info *server, char *buf)
+{
+ char temp = *buf;
+ unsigned int pdu_length = be32_to_cpu(
+ ((struct smb_hdr *)buf)->smb_buf_length);
+
+ /*
+ * The first byte big endian of the length field,
+ * is actually not part of the length but the type
+ * with the most common, zero, as regular data.
+ */
+ if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) {
+ return false;
+ } else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) {
+ cFYI(1, "Good RFC 1002 session rsp");
+ return false;
+ } else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
+ /*
+ * We get this from Windows 98 instead of an error on
+ * SMB negprot response.
+ */
+ cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
+ pdu_length);
+ /* give server a second to clean up */
+ msleep(1000);
+ /*
+ * Always try 445 first on reconnect since we get NACK
+ * on some if we ever connected to port 139 (the NACK
+ * is since we do not begin with RFC1001 session
+ * initialize frame).
+ */
+ cifs_set_port((struct sockaddr *)
+ &server->dstaddr, CIFS_PORT);
+ cifs_reconnect(server);
+ wake_up(&server->response_q);
+ return false;
+ } else if (temp != (char) 0) {
+ cERROR(1, "Unknown RFC 1002 frame");
+ cifs_dump_mem(" Received Data: ", buf, 4);
+ cifs_reconnect(server);
+ return false;
+ }
+
+ /* else we have an SMB response */
+ if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
+ (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) {
+ cERROR(1, "Invalid size SMB length %d pdu_length %d",
+ 4, pdu_length+4);
+ cifs_reconnect(server);
+ wake_up(&server->response_q);
+ return false;
+ }
+
+ return true;
+}
+
+static struct mid_q_entry *
+find_cifs_mid(struct TCP_Server_Info *server, struct smb_hdr *buf,
+ int *length, bool is_large_buf, bool *is_multi_rsp, char **bigbuf)
+{
+ struct mid_q_entry *mid = NULL, *tmp_mid, *ret = NULL;
+
+ spin_lock(&GlobalMid_Lock);
+ list_for_each_entry_safe(mid, tmp_mid, &server->pending_mid_q, qhead) {
+ if (mid->mid != buf->Mid ||
+ mid->midState != MID_REQUEST_SUBMITTED ||
+ mid->command != buf->Command)
+ continue;
+
+ if (*length == 0 && check2ndT2(buf, server->maxBuf) > 0) {
+ /* We have a multipart transact2 resp */
+ *is_multi_rsp = true;
+ if (mid->resp_buf) {
+ /* merge response - fix up 1st*/
+ *length = coalesce_t2(buf, mid->resp_buf);
+ if (*length > 0) {
+ *length = 0;
+ mid->multiRsp = true;
+ break;
+ }
+ /* All parts received or packet is malformed. */
+ mid->multiEnd = true;
+ goto multi_t2_fnd;
+ }
+ if (!is_large_buf) {
+ /*FIXME: switch to already allocated largebuf?*/
+ cERROR(1, "1st trans2 resp needs bigbuf");
+ } else {
+ /* Have first buffer */
+ mid->resp_buf = buf;
+ mid->largeBuf = true;
+ *bigbuf = NULL;
+ }
+ break;
+ }
+ mid->resp_buf = buf;
+ mid->largeBuf = is_large_buf;
+multi_t2_fnd:
+ if (*length == 0)
+ mid->midState = MID_RESPONSE_RECEIVED;
+ else
+ mid->midState = MID_RESPONSE_MALFORMED;
+#ifdef CONFIG_CIFS_STATS2
+ mid->when_received = jiffies;
+#endif
+ list_del_init(&mid->qhead);
+ ret = mid;
+ break;
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ return ret;
+}
+
+static void clean_demultiplex_info(struct TCP_Server_Info *server)
+{
+ int length;
+
+ /* take it off the list, if it's not already */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_del_init(&server->tcp_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ spin_lock(&GlobalMid_Lock);
+ server->tcpStatus = CifsExiting;
+ spin_unlock(&GlobalMid_Lock);
+ wake_up_all(&server->response_q);
+
+ /*
+ * Check if we have blocked requests that need to free. Note that
+ * cifs_max_pending is normally 50, but can be set at module install
+ * time to as little as two.
+ */
+ spin_lock(&GlobalMid_Lock);
+ if (atomic_read(&server->inFlight) >= cifs_max_pending)
+ atomic_set(&server->inFlight, cifs_max_pending - 1);
+ /*
+ * We do not want to set the max_pending too low or we could end up
+ * with the counter going negative.
+ */
+ spin_unlock(&GlobalMid_Lock);
+ /*
+ * Although there should not be any requests blocked on this queue it
+ * can not hurt to be paranoid and try to wake up requests that may
+ * haven been blocked when more than 50 at time were on the wire to the
+ * same server - they now will see the session is in exit state and get
+ * out of SendReceive.
+ */
+ wake_up_all(&server->request_q);
+ /* give those requests time to exit */
+ msleep(125);
+
+ if (server->ssocket) {
+ sock_release(server->ssocket);
+ server->ssocket = NULL;
+ }
+
+ if (!list_empty(&server->pending_mid_q)) {
+ struct list_head dispose_list;
+ struct mid_q_entry *mid_entry;
+ struct list_head *tmp, *tmp2;
+
+ INIT_LIST_HEAD(&dispose_list);
+ spin_lock(&GlobalMid_Lock);
+ list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
+ mid_entry->midState = MID_SHUTDOWN;
+ list_move(&mid_entry->qhead, &dispose_list);
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ /* now walk dispose list and issue callbacks */
+ list_for_each_safe(tmp, tmp2, &dispose_list) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ cFYI(1, "Callback mid 0x%x", mid_entry->mid);
+ list_del_init(&mid_entry->qhead);
+ mid_entry->callback(mid_entry);
+ }
+ /* 1/8th of sec is more than enough time for them to exit */
+ msleep(125);
+ }
+
+ if (!list_empty(&server->pending_mid_q)) {
+ /*
+ * mpx threads have not exited yet give them at least the smb
+ * send timeout time for long ops.
+ *
+ * Due to delays on oplock break requests, we need to wait at
+ * least 45 seconds before giving up on a request getting a
+ * response and going ahead and killing cifsd.
+ */
+ cFYI(1, "Wait for exit from demultiplex thread");
+ msleep(46000);
+ /*
+ * If threads still have not exited they are probably never
+ * coming home not much else we can do but free the memory.
+ */
+ }
+
+ kfree(server->hostname);
+ kfree(server);
+
+ length = atomic_dec_return(&tcpSesAllocCount);
+ if (length > 0)
+ mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
+ GFP_KERNEL);
+}
+
static int
cifs_demultiplex_thread(void *p)
{
int length;
struct TCP_Server_Info *server = p;
unsigned int pdu_length, total_read;
+ char *buf = NULL, *bigbuf = NULL, *smallbuf = NULL;
struct smb_hdr *smb_buffer = NULL;
- struct smb_hdr *bigbuf = NULL;
- struct smb_hdr *smallbuf = NULL;
struct msghdr smb_msg;
struct kvec iov;
- struct socket *csocket = server->ssocket;
- struct list_head *tmp, *tmp2;
struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry;
- char temp;
bool isLargeBuf = false;
- bool isMultiRsp;
- int reconnect;
+ bool isMultiRsp = false;
+ int rc;
current->flags |= PF_MEMALLOC;
cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
@@ -351,35 +654,16 @@ cifs_demultiplex_thread(void *p)
while (server->tcpStatus != CifsExiting) {
if (try_to_freeze())
continue;
- if (bigbuf == NULL) {
- bigbuf = cifs_buf_get();
- if (!bigbuf) {
- cERROR(1, "No memory for large SMB response");
- msleep(3000);
- /* retry will check if exiting */
- continue;
- }
- } else if (isLargeBuf) {
- /* we are reusing a dirty large buf, clear its start */
- memset(bigbuf, 0, sizeof(struct smb_hdr));
- }
- if (smallbuf == NULL) {
- smallbuf = cifs_small_buf_get();
- if (!smallbuf) {
- cERROR(1, "No memory for SMB response");
- msleep(1000);
- /* retry will check if exiting */
- continue;
- }
- /* beginning of smb buffer is cleared in our buf_get */
- } else /* if existing small buf clear beginning */
- memset(smallbuf, 0, sizeof(struct smb_hdr));
+ if (!allocate_buffers(&bigbuf, &smallbuf,
+ sizeof(struct smb_hdr), isLargeBuf))
+ continue;
isLargeBuf = false;
isMultiRsp = false;
- smb_buffer = smallbuf;
- iov.iov_base = smb_buffer;
+ smb_buffer = (struct smb_hdr *)smallbuf;
+ buf = smallbuf;
+ iov.iov_base = buf;
iov.iov_len = 4;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
@@ -393,158 +677,50 @@ incomplete_rcv:
"Reconnecting...", server->hostname,
(echo_retries * SMB_ECHO_INTERVAL / HZ));
cifs_reconnect(server);
- csocket = server->ssocket;
wake_up(&server->response_q);
continue;
}
- length =
- kernel_recvmsg(csocket, &smb_msg,
- &iov, 1, pdu_length, 0 /* BB other flags? */);
-
- if (server->tcpStatus == CifsExiting) {
+ rc = read_from_socket(server, &smb_msg, &iov, pdu_length,
+ &total_read, true /* header read */);
+ if (rc == 3)
+ goto incomplete_rcv;
+ else if (rc == 2)
break;
- } else if (server->tcpStatus == CifsNeedReconnect) {
- cFYI(1, "Reconnect after server stopped responding");
- cifs_reconnect(server);
- cFYI(1, "call to reconnect done");
- csocket = server->ssocket;
- continue;
- } else if (length == -ERESTARTSYS ||
- length == -EAGAIN ||
- length == -EINTR) {
- msleep(1); /* minimum sleep to prevent looping
- allowing socket to clear and app threads to set
- tcpStatus CifsNeedReconnect if server hung */
- if (pdu_length < 4) {
- iov.iov_base = (4 - pdu_length) +
- (char *)smb_buffer;
- iov.iov_len = pdu_length;
- smb_msg.msg_control = NULL;
- smb_msg.msg_controllen = 0;
- goto incomplete_rcv;
- } else
- continue;
- } else if (length <= 0) {
- cFYI(1, "Reconnect after unexpected peek error %d",
- length);
- cifs_reconnect(server);
- csocket = server->ssocket;
- wake_up(&server->response_q);
+ else if (rc == 1)
continue;
- } else if (length < pdu_length) {
- cFYI(1, "requested %d bytes but only got %d bytes",
- pdu_length, length);
- pdu_length -= length;
- msleep(1);
- goto incomplete_rcv;
- }
-
- /* The right amount was read from socket - 4 bytes */
- /* so we can now interpret the length field */
- /* the first byte big endian of the length field,
- is actually not part of the length but the type
- with the most common, zero, as regular data */
- temp = *((char *) smb_buffer);
+ /*
+ * The right amount was read from socket - 4 bytes,
+ * so we can now interpret the length field.
+ */
- /* Note that FC 1001 length is big endian on the wire,
- but we convert it here so it is always manipulated
- as host byte order */
+ /*
+ * Note that RFC 1001 length is big endian on the wire,
+ * but we convert it here so it is always manipulated
+ * as host byte order.
+ */
pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
cFYI(1, "rfc1002 length 0x%x", pdu_length+4);
-
- if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) {
- continue;
- } else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) {
- cFYI(1, "Good RFC 1002 session rsp");
- continue;
- } else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
- /* we get this from Windows 98 instead of
- an error on SMB negprot response */
- cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
- pdu_length);
- /* give server a second to clean up */
- msleep(1000);
- /* always try 445 first on reconnect since we get NACK
- * on some if we ever connected to port 139 (the NACK
- * is since we do not begin with RFC1001 session
- * initialize frame)
- */
- cifs_set_port((struct sockaddr *)
- &server->dstaddr, CIFS_PORT);
- cifs_reconnect(server);
- csocket = server->ssocket;
- wake_up(&server->response_q);
- continue;
- } else if (temp != (char) 0) {
- cERROR(1, "Unknown RFC 1002 frame");
- cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
- length);
- cifs_reconnect(server);
- csocket = server->ssocket;
+ if (!check_rfc1002_header(server, buf))
continue;
- }
-
- /* else we have an SMB response */
- if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
- (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) {
- cERROR(1, "Invalid size SMB length %d pdu_length %d",
- length, pdu_length+4);
- cifs_reconnect(server);
- csocket = server->ssocket;
- wake_up(&server->response_q);
- continue;
- }
/* else length ok */
- reconnect = 0;
-
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
isLargeBuf = true;
memcpy(bigbuf, smallbuf, 4);
- smb_buffer = bigbuf;
+ smb_buffer = (struct smb_hdr *)bigbuf;
+ buf = bigbuf;
}
- length = 0;
- iov.iov_base = 4 + (char *)smb_buffer;
+
+ iov.iov_base = 4 + buf;
iov.iov_len = pdu_length;
- for (total_read = 0; total_read < pdu_length;
- total_read += length) {
- length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
- pdu_length - total_read, 0);
- if (server->tcpStatus == CifsExiting) {
- /* then will exit */
- reconnect = 2;
- break;
- } else if (server->tcpStatus == CifsNeedReconnect) {
- cifs_reconnect(server);
- csocket = server->ssocket;
- /* Reconnect wakes up rspns q */
- /* Now we will reread sock */
- reconnect = 1;
- break;
- } else if (length == -ERESTARTSYS ||
- length == -EAGAIN ||
- length == -EINTR) {
- msleep(1); /* minimum sleep to prevent looping,
- allowing socket to clear and app
- threads to set tcpStatus
- CifsNeedReconnect if server hung*/
- length = 0;
- continue;
- } else if (length <= 0) {
- cERROR(1, "Received no data, expecting %d",
- pdu_length - total_read);
- cifs_reconnect(server);
- csocket = server->ssocket;
- reconnect = 1;
- break;
- }
- }
- if (reconnect == 2)
+ rc = read_from_socket(server, &smb_msg, &iov, pdu_length,
+ &total_read, false);
+ if (rc == 2)
break;
- else if (reconnect == 1)
+ else if (rc == 1)
continue;
total_read += 4; /* account for rfc1002 hdr */
@@ -562,75 +738,13 @@ incomplete_rcv:
*/
length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
if (length != 0)
- cifs_dump_mem("Bad SMB: ", smb_buffer,
- min_t(unsigned int, total_read, 48));
+ cifs_dump_mem("Bad SMB: ", buf,
+ min_t(unsigned int, total_read, 48));
- mid_entry = NULL;
server->lstrp = jiffies;
- spin_lock(&GlobalMid_Lock);
- list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-
- if (mid_entry->mid != smb_buffer->Mid ||
- mid_entry->midState != MID_REQUEST_SUBMITTED ||
- mid_entry->command != smb_buffer->Command) {
- mid_entry = NULL;
- continue;
- }
-
- if (length == 0 &&
- check2ndT2(smb_buffer, server->maxBuf) > 0) {
- /* We have a multipart transact2 resp */
- isMultiRsp = true;
- if (mid_entry->resp_buf) {
- /* merge response - fix up 1st*/
- length = coalesce_t2(smb_buffer,
- mid_entry->resp_buf);
- if (length > 0) {
- length = 0;
- mid_entry->multiRsp = true;
- break;
- } else {
- /* all parts received or
- * packet is malformed
- */
- mid_entry->multiEnd = true;
- goto multi_t2_fnd;
- }
- } else {
- if (!isLargeBuf) {
- /*
- * FIXME: switch to already
- * allocated largebuf?
- */
- cERROR(1, "1st trans2 resp "
- "needs bigbuf");
- } else {
- /* Have first buffer */
- mid_entry->resp_buf =
- smb_buffer;
- mid_entry->largeBuf = true;
- bigbuf = NULL;
- }
- }
- break;
- }
- mid_entry->resp_buf = smb_buffer;
- mid_entry->largeBuf = isLargeBuf;
-multi_t2_fnd:
- if (length == 0)
- mid_entry->midState = MID_RESPONSE_RECEIVED;
- else
- mid_entry->midState = MID_RESPONSE_MALFORMED;
-#ifdef CONFIG_CIFS_STATS2
- mid_entry->when_received = jiffies;
-#endif
- list_del_init(&mid_entry->qhead);
- break;
- }
- spin_unlock(&GlobalMid_Lock);
-
+ mid_entry = find_cifs_mid(server, smb_buffer, &length,
+ isLargeBuf, &isMultiRsp, &bigbuf);
if (mid_entry != NULL) {
mid_entry->callback(mid_entry);
/* Was previous buf put in mpx struct for multi-rsp? */
@@ -648,7 +762,7 @@ multi_t2_fnd:
!isMultiRsp) {
cERROR(1, "No task to wake, unknown frame received! "
"NumMids %d", atomic_read(&midCount));
- cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
+ cifs_dump_mem("Received Data is: ", buf,
sizeof(struct smb_hdr));
#ifdef CONFIG_CIFS_DEBUG2
cifs_dump_detail(smb_buffer);
@@ -658,88 +772,13 @@ multi_t2_fnd:
}
} /* end while !EXITING */
- /* take it off the list, if it's not already */
- spin_lock(&cifs_tcp_ses_lock);
- list_del_init(&server->tcp_ses_list);
- spin_unlock(&cifs_tcp_ses_lock);
-
- spin_lock(&GlobalMid_Lock);
- server->tcpStatus = CifsExiting;
- spin_unlock(&GlobalMid_Lock);
- wake_up_all(&server->response_q);
-
- /* check if we have blocked requests that need to free */
- /* Note that cifs_max_pending is normally 50, but
- can be set at module install time to as little as two */
- spin_lock(&GlobalMid_Lock);
- if (atomic_read(&server->inFlight) >= cifs_max_pending)
- atomic_set(&server->inFlight, cifs_max_pending - 1);
- /* We do not want to set the max_pending too low or we
- could end up with the counter going negative */
- spin_unlock(&GlobalMid_Lock);
- /* Although there should not be any requests blocked on
- this queue it can not hurt to be paranoid and try to wake up requests
- that may haven been blocked when more than 50 at time were on the wire
- to the same server - they now will see the session is in exit state
- and get out of SendReceive. */
- wake_up_all(&server->request_q);
- /* give those requests time to exit */
- msleep(125);
-
- if (server->ssocket) {
- sock_release(csocket);
- server->ssocket = NULL;
- }
/* buffer usually freed in free_mid - need to free it here on exit */
cifs_buf_release(bigbuf);
if (smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(smallbuf);
- if (!list_empty(&server->pending_mid_q)) {
- struct list_head dispose_list;
-
- INIT_LIST_HEAD(&dispose_list);
- spin_lock(&GlobalMid_Lock);
- list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
- mid_entry->midState = MID_SHUTDOWN;
- list_move(&mid_entry->qhead, &dispose_list);
- }
- spin_unlock(&GlobalMid_Lock);
-
- /* now walk dispose list and issue callbacks */
- list_for_each_safe(tmp, tmp2, &dispose_list) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cFYI(1, "Callback mid 0x%x", mid_entry->mid);
- list_del_init(&mid_entry->qhead);
- mid_entry->callback(mid_entry);
- }
- /* 1/8th of sec is more than enough time for them to exit */
- msleep(125);
- }
-
- if (!list_empty(&server->pending_mid_q)) {
- /* mpx threads have not exited yet give them
- at least the smb send timeout time for long ops */
- /* due to delays on oplock break requests, we need
- to wait at least 45 seconds before giving up
- on a request getting a response and going ahead
- and killing cifsd */
- cFYI(1, "Wait for exit from demultiplex thread");
- msleep(46000);
- /* if threads still have not exited they are probably never
- coming home not much else we can do but free the memory */
- }
-
- kfree(server->hostname);
task_to_wake = xchg(&server->tsk, NULL);
- kfree(server);
-
- length = atomic_dec_return(&tcpSesAllocCount);
- if (length > 0)
- mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
- GFP_KERNEL);
+ clean_demultiplex_info(server);
/* if server->tsk was NULL then wait for a signal before exiting */
if (!task_to_wake) {
@@ -3193,15 +3232,9 @@ mount_fail_check:
else
cifs_put_tcp_session(srvTcp);
bdi_destroy(&cifs_sb->bdi);
- goto out;
}
- /* volume_info->password is freed above when existing session found
- (in which case it is not needed anymore) but when new sesion is created
- the password ptr is put in the new session structure (in which case the
- password will be freed at unmount time) */
out:
- /* zero out password before freeing */
FreeXid(xid);
return rc;
}
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 548f06230a6..1d2d91d9bf6 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -79,8 +79,8 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
/* Perform the upcall */
rc = dns_query(NULL, hostname, len, NULL, ip_addr, NULL);
if (rc < 0)
- cERROR(1, "%s: unable to resolve: %*.*s",
- __func__, len, len, hostname);
+ cFYI(1, "%s: unable to resolve: %*.*s",
+ __func__, len, len, hostname);
else
cFYI(1, "%s: resolved: %*.*s to %s",
__func__, len, len, hostname, *ip_addr);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 378acdafa35..9f41a10523a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -314,6 +314,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
}
spin_unlock(&cifs_file_list_lock);
+ cancel_work_sync(&cifs_file->oplock_break);
+
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
int xid, rc;
@@ -2418,31 +2420,6 @@ void cifs_oplock_break(struct work_struct *work)
cinode->clientCanCacheRead ? 1 : 0);
cFYI(1, "Oplock release rc = %d", rc);
}
-
- /*
- * We might have kicked in before is_valid_oplock_break()
- * finished grabbing reference for us. Make sure it's done by
- * waiting for cifs_file_list_lock.
- */
- spin_lock(&cifs_file_list_lock);
- spin_unlock(&cifs_file_list_lock);
-
- cifs_oplock_break_put(cfile);
-}
-
-/* must be called while holding cifs_file_list_lock */
-void cifs_oplock_break_get(struct cifsFileInfo *cfile)
-{
- cifs_sb_active(cfile->dentry->d_sb);
- cifsFileInfo_get(cfile);
-}
-
-void cifs_oplock_break_put(struct cifsFileInfo *cfile)
-{
- struct super_block *sb = cfile->dentry->d_sb;
-
- cifsFileInfo_put(cfile);
- cifs_sb_deactive(sb);
}
const struct address_space_operations cifs_addr_ops = {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9b018c8334f..a7b2dcd4a53 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -764,20 +764,10 @@ char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
if (full_path == NULL)
return full_path;
- if (dfsplen) {
+ if (dfsplen)
strncpy(full_path, tcon->treeName, dfsplen);
- /* switch slash direction in prepath depending on whether
- * windows or posix style path names
- */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
- int i;
- for (i = 0; i < dfsplen; i++) {
- if (full_path[i] == '\\')
- full_path[i] = '/';
- }
- }
- }
strncpy(full_path + dfsplen, vol->prepath, pplen);
+ convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
full_path[dfsplen + pplen] = 0; /* add trailing null */
return full_path;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 03a1f491d39..7c169339259 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -585,15 +585,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
cifs_set_oplock_level(pCifsInode,
pSMB->OplockLevel ? OPLOCK_READ : 0);
- /*
- * cifs_oplock_break_put() can't be called
- * from here. Get reference after queueing
- * succeeded. cifs_oplock_break() will
- * synchronize using cifs_file_list_lock.
- */
- if (queue_work(system_nrt_wq,
- &netfile->oplock_break))
- cifs_oplock_break_get(netfile);
+ queue_work(system_nrt_wq,
+ &netfile->oplock_break);
netfile->oplock_break_cancelled = false;
spin_unlock(&cifs_file_list_lock);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 147aa22c3c3..c1b9c4b1073 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -362,6 +362,8 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
mid = AllocMidQEntry(hdr, server);
if (mid == NULL) {
mutex_unlock(&server->srv_mutex);
+ atomic_dec(&server->inFlight);
+ wake_up(&server->request_q);
return -ENOMEM;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index b05aac3a8cf..a88948b8bd1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -301,6 +301,27 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
return parent;
}
+/*
+ * Unhash a dentry without inserting an RCU walk barrier or checking that
+ * dentry->d_lock is locked. The caller must take care of that, if
+ * appropriate.
+ */
+static void __d_shrink(struct dentry *dentry)
+{
+ if (!d_unhashed(dentry)) {
+ struct hlist_bl_head *b;
+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+ b = &dentry->d_sb->s_anon;
+ else
+ b = d_hash(dentry->d_parent, dentry->d_name.hash);
+
+ hlist_bl_lock(b);
+ __hlist_bl_del(&dentry->d_hash);
+ dentry->d_hash.pprev = NULL;
+ hlist_bl_unlock(b);
+ }
+}
+
/**
* d_drop - drop a dentry
* @dentry: dentry to drop
@@ -319,17 +340,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
void __d_drop(struct dentry *dentry)
{
if (!d_unhashed(dentry)) {
- struct hlist_bl_head *b;
- if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
- b = &dentry->d_sb->s_anon;
- else
- b = d_hash(dentry->d_parent, dentry->d_name.hash);
-
- hlist_bl_lock(b);
- __hlist_bl_del(&dentry->d_hash);
- dentry->d_hash.pprev = NULL;
- hlist_bl_unlock(b);
-
+ __d_shrink(dentry);
dentry_rcuwalk_barrier(dentry);
}
}
@@ -784,6 +795,7 @@ relock:
/**
* prune_dcache_sb - shrink the dcache
+ * @sb: superblock
* @nr_to_scan: number of entries to try to free
*
* Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
@@ -828,44 +840,24 @@ EXPORT_SYMBOL(shrink_dcache_sb);
static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
{
struct dentry *parent;
- unsigned detached = 0;
BUG_ON(!IS_ROOT(dentry));
- /* detach this root from the system */
- spin_lock(&dentry->d_lock);
- dentry_lru_del(dentry);
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
-
for (;;) {
/* descend to the first leaf in the current subtree */
- while (!list_empty(&dentry->d_subdirs)) {
- struct dentry *loop;
-
- /* this is a branch with children - detach all of them
- * from the system in one go */
- spin_lock(&dentry->d_lock);
- list_for_each_entry(loop, &dentry->d_subdirs,
- d_u.d_child) {
- spin_lock_nested(&loop->d_lock,
- DENTRY_D_LOCK_NESTED);
- dentry_lru_del(loop);
- __d_drop(loop);
- spin_unlock(&loop->d_lock);
- }
- spin_unlock(&dentry->d_lock);
-
- /* move to the first child */
+ while (!list_empty(&dentry->d_subdirs))
dentry = list_entry(dentry->d_subdirs.next,
struct dentry, d_u.d_child);
- }
/* consume the dentries from this leaf up through its parents
* until we find one with children or run out altogether */
do {
struct inode *inode;
+ /* detach from the system */
+ dentry_lru_del(dentry);
+ __d_shrink(dentry);
+
if (dentry->d_count != 0) {
printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%s}"
@@ -886,14 +878,10 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
list_del(&dentry->d_u.d_child);
} else {
parent = dentry->d_parent;
- spin_lock(&parent->d_lock);
parent->d_count--;
list_del(&dentry->d_u.d_child);
- spin_unlock(&parent->d_lock);
}
- detached++;
-
inode = dentry->d_inode;
if (inode) {
dentry->d_inode = NULL;
@@ -938,9 +926,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
dentry = sb->s_root;
sb->s_root = NULL;
- spin_lock(&dentry->d_lock);
dentry->d_count--;
- spin_unlock(&dentry->d_lock);
shrink_dcache_for_umount_subtree(dentry);
while (!hlist_bl_empty(&sb->s_anon)) {
@@ -1743,7 +1729,7 @@ seqretry:
*/
if (read_seqcount_retry(&dentry->d_seq, *seq))
goto seqretry;
- if (parent->d_flags & DCACHE_OP_COMPARE) {
+ if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
if (parent->d_op->d_compare(parent, *inode,
dentry, i,
tlen, tname, name))
diff --git a/fs/exofs/Kbuild b/fs/exofs/Kbuild
index 2d0f757fda3..c5a5855a6c4 100644
--- a/fs/exofs/Kbuild
+++ b/fs/exofs/Kbuild
@@ -12,5 +12,8 @@
# Kbuild - Gets included from the Kernels Makefile and build system
#
-exofs-y := ios.o inode.o file.o symlink.o namei.o dir.o super.o
+# ore module library
+obj-$(CONFIG_ORE) += ore.o
+
+exofs-y := inode.o file.o symlink.o namei.o dir.o super.o
obj-$(CONFIG_EXOFS_FS) += exofs.o
diff --git a/fs/exofs/Kconfig b/fs/exofs/Kconfig
index 86194b2f799..70bae414929 100644
--- a/fs/exofs/Kconfig
+++ b/fs/exofs/Kconfig
@@ -1,6 +1,10 @@
+config ORE
+ tristate
+
config EXOFS_FS
tristate "exofs: OSD based file system support"
depends on SCSI_OSD_ULD
+ select ORE
help
EXOFS is a file system that uses an OSD storage device,
as its backing storage.
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index c965806c282..f4e442ec744 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -36,12 +36,9 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/backing-dev.h>
-#include "common.h"
+#include <scsi/osd_ore.h>
-/* FIXME: Remove once pnfs hits mainline
- * #include <linux/exportfs/pnfs_osd_xdr.h>
- */
-#include "pnfs.h"
+#include "common.h"
#define EXOFS_ERR(fmt, a...) printk(KERN_ERR "exofs: " fmt, ##a)
@@ -56,27 +53,11 @@
/* u64 has problems with printk this will cast it to unsigned long long */
#define _LLU(x) (unsigned long long)(x)
-struct exofs_layout {
- osd_id s_pid; /* partition ID of file system*/
-
- /* Our way of looking at the data_map */
- unsigned stripe_unit;
- unsigned mirrors_p1;
-
- unsigned group_width;
- u64 group_depth;
- unsigned group_count;
-
- enum exofs_inode_layout_gen_functions lay_func;
-
- unsigned s_numdevs; /* Num of devices in array */
- struct osd_dev *s_ods[0]; /* Variable length */
-};
-
/*
* our extension to the in-memory superblock
*/
struct exofs_sb_info {
+ struct backing_dev_info bdi; /* register our bdi with VFS */
struct exofs_sb_stats s_ess; /* Written often, pre-allocate*/
int s_timeout; /* timeout for OSD operations */
uint64_t s_nextid; /* highest object ID used */
@@ -84,16 +65,13 @@ struct exofs_sb_info {
spinlock_t s_next_gen_lock; /* spinlock for gen # update */
u32 s_next_generation; /* next gen # to use */
atomic_t s_curr_pending; /* number of pending commands */
- uint8_t s_cred[OSD_CAP_LEN]; /* credential for the fscb */
- struct backing_dev_info bdi; /* register our bdi with VFS */
struct pnfs_osd_data_map data_map; /* Default raid to use
* FIXME: Needed ?
*/
-/* struct exofs_layout dir_layout;*/ /* Default dir layout */
- struct exofs_layout layout; /* Default files layout,
- * contains the variable osd_dev
- * array. Keep last */
+ struct ore_layout layout; /* Default files layout */
+ struct ore_comp one_comp; /* id & cred of partition id=0*/
+ struct ore_components comps; /* comps for the partition */
struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */
};
@@ -107,7 +85,8 @@ struct exofs_i_info {
uint32_t i_data[EXOFS_IDATA];/*short symlink names and device #s*/
uint32_t i_dir_start_lookup; /* which page to start lookup */
uint64_t i_commit_size; /* the object's written length */
- uint8_t i_cred[OSD_CAP_LEN];/* all-powerful credential */
+ struct ore_comp one_comp; /* same component for all devices */
+ struct ore_components comps; /* inode view of the device table */
};
static inline osd_id exofs_oi_objno(struct exofs_i_info *oi)
@@ -115,52 +94,6 @@ static inline osd_id exofs_oi_objno(struct exofs_i_info *oi)
return oi->vfs_inode.i_ino + EXOFS_OBJ_OFF;
}
-struct exofs_io_state;
-typedef void (*exofs_io_done_fn)(struct exofs_io_state *or, void *private);
-
-struct exofs_io_state {
- struct kref kref;
-
- void *private;
- exofs_io_done_fn done;
-
- struct exofs_layout *layout;
- struct osd_obj_id obj;
- u8 *cred;
-
- /* Global read/write IO*/
- loff_t offset;
- unsigned long length;
- void *kern_buff;
-
- struct page **pages;
- unsigned nr_pages;
- unsigned pgbase;
- unsigned pages_consumed;
-
- /* Attributes */
- unsigned in_attr_len;
- struct osd_attr *in_attr;
- unsigned out_attr_len;
- struct osd_attr *out_attr;
-
- /* Variable array of size numdevs */
- unsigned numdevs;
- struct exofs_per_dev_state {
- struct osd_request *or;
- struct bio *bio;
- loff_t offset;
- unsigned length;
- unsigned dev;
- } per_dev[];
-};
-
-static inline unsigned exofs_io_state_size(unsigned numdevs)
-{
- return sizeof(struct exofs_io_state) +
- sizeof(struct exofs_per_dev_state) * numdevs;
-}
-
/*
* our inode flags
*/
@@ -205,12 +138,6 @@ static inline struct exofs_i_info *exofs_i(struct inode *inode)
}
/*
- * Given a layout, object_number and stripe_index return the associated global
- * dev_index
- */
-unsigned exofs_layout_od_id(struct exofs_layout *layout,
- osd_id obj_no, unsigned layout_index);
-/*
* Maximum count of links to a file
*/
#define EXOFS_LINK_MAX 32000
@@ -219,44 +146,8 @@ unsigned exofs_layout_od_id(struct exofs_layout *layout,
* function declarations *
*************************/
-/* ios.c */
-void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
- const struct osd_obj_id *obj);
-int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
- u64 offset, void *p, unsigned length);
-
-int exofs_get_io_state(struct exofs_layout *layout,
- struct exofs_io_state **ios);
-void exofs_put_io_state(struct exofs_io_state *ios);
-
-int exofs_check_io(struct exofs_io_state *ios, u64 *resid);
-
-int exofs_sbi_create(struct exofs_io_state *ios);
-int exofs_sbi_remove(struct exofs_io_state *ios);
-int exofs_sbi_write(struct exofs_io_state *ios);
-int exofs_sbi_read(struct exofs_io_state *ios);
-
-int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr);
-
-int exofs_oi_truncate(struct exofs_i_info *oi, u64 new_len);
-static inline int exofs_oi_write(struct exofs_i_info *oi,
- struct exofs_io_state *ios)
-{
- ios->obj.id = exofs_oi_objno(oi);
- ios->cred = oi->i_cred;
- return exofs_sbi_write(ios);
-}
-
-static inline int exofs_oi_read(struct exofs_i_info *oi,
- struct exofs_io_state *ios)
-{
- ios->obj.id = exofs_oi_objno(oi);
- ios->cred = oi->i_cred;
- return exofs_sbi_read(ios);
-}
-
/* inode.c */
-unsigned exofs_max_io_pages(struct exofs_layout *layout,
+unsigned exofs_max_io_pages(struct ore_layout *layout,
unsigned expected_pages);
int exofs_setattr(struct dentry *, struct iattr *);
int exofs_write_begin(struct file *file, struct address_space *mapping,
@@ -281,6 +172,8 @@ int exofs_set_link(struct inode *, struct exofs_dir_entry *, struct page *,
struct inode *);
/* super.c */
+void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
+ const struct osd_obj_id *obj);
int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
/*********************
@@ -295,7 +188,6 @@ extern const struct file_operations exofs_file_operations;
/* inode.c */
extern const struct address_space_operations exofs_aops;
-extern const struct osd_attr g_attr_logical_length;
/* namei.c */
extern const struct inode_operations exofs_dir_inode_operations;
@@ -305,4 +197,33 @@ extern const struct inode_operations exofs_special_inode_operations;
extern const struct inode_operations exofs_symlink_inode_operations;
extern const struct inode_operations exofs_fast_symlink_inode_operations;
+/* exofs_init_comps will initialize an ore_components device array
+ * pointing to a single ore_comp struct, and a round-robin view
+ * of the device table.
+ * The first device of each inode is the [inode->ino % num_devices]
+ * and the rest of the devices sequentially following where the
+ * first device is after the last device.
+ * It is assumed that the global device array at @sbi is twice
+ * bigger and that the device table repeats twice.
+ * See: exofs_read_lookup_dev_table()
+ */
+static inline void exofs_init_comps(struct ore_components *comps,
+ struct ore_comp *one_comp,
+ struct exofs_sb_info *sbi, osd_id oid)
+{
+ unsigned dev_mod = (unsigned)oid, first_dev;
+
+ one_comp->obj.partition = sbi->one_comp.obj.partition;
+ one_comp->obj.id = oid;
+ exofs_make_credential(one_comp->cred, &one_comp->obj);
+
+ comps->numdevs = sbi->comps.numdevs;
+ comps->single_comp = EC_SINGLE_COMP;
+ comps->comps = one_comp;
+
+ /* Round robin device view of the table */
+ first_dev = (dev_mod * sbi->layout.mirrors_p1) % sbi->comps.numdevs;
+ comps->ods = sbi->comps.ods + first_dev;
+}
+
#endif
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 8472c098445..f39a38fc234 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -43,7 +43,7 @@ enum { BIO_MAX_PAGES_KMALLOC =
PAGE_SIZE / sizeof(struct page *),
};
-unsigned exofs_max_io_pages(struct exofs_layout *layout,
+unsigned exofs_max_io_pages(struct ore_layout *layout,
unsigned expected_pages)
{
unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
@@ -58,7 +58,7 @@ struct page_collect {
struct exofs_sb_info *sbi;
struct inode *inode;
unsigned expected_pages;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct page **pages;
unsigned alloc_pages;
@@ -110,13 +110,6 @@ static int pcol_try_alloc(struct page_collect *pcol)
{
unsigned pages;
- if (!pcol->ios) { /* First time allocate io_state */
- int ret = exofs_get_io_state(&pcol->sbi->layout, &pcol->ios);
-
- if (ret)
- return ret;
- }
-
/* TODO: easily support bio chaining */
pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
@@ -140,7 +133,7 @@ static void pcol_free(struct page_collect *pcol)
pcol->pages = NULL;
if (pcol->ios) {
- exofs_put_io_state(pcol->ios);
+ ore_put_io_state(pcol->ios);
pcol->ios = NULL;
}
}
@@ -200,7 +193,7 @@ static int __readpages_done(struct page_collect *pcol)
u64 resid;
u64 good_bytes;
u64 length = 0;
- int ret = exofs_check_io(pcol->ios, &resid);
+ int ret = ore_check_io(pcol->ios, &resid);
if (likely(!ret))
good_bytes = pcol->length;
@@ -241,7 +234,7 @@ static int __readpages_done(struct page_collect *pcol)
}
/* callback of async reads */
-static void readpages_done(struct exofs_io_state *ios, void *p)
+static void readpages_done(struct ore_io_state *ios, void *p)
{
struct page_collect *pcol = p;
@@ -269,20 +262,28 @@ static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
static int read_exec(struct page_collect *pcol)
{
struct exofs_i_info *oi = exofs_i(pcol->inode);
- struct exofs_io_state *ios = pcol->ios;
+ struct ore_io_state *ios;
struct page_collect *pcol_copy = NULL;
int ret;
if (!pcol->pages)
return 0;
+ if (!pcol->ios) {
+ int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->comps, true,
+ pcol->pg_first << PAGE_CACHE_SHIFT,
+ pcol->length, &pcol->ios);
+
+ if (ret)
+ return ret;
+ }
+
+ ios = pcol->ios;
ios->pages = pcol->pages;
ios->nr_pages = pcol->nr_pages;
- ios->length = pcol->length;
- ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT;
if (pcol->read_4_write) {
- exofs_oi_read(oi, pcol->ios);
+ ore_read(pcol->ios);
return __readpages_done(pcol);
}
@@ -295,14 +296,14 @@ static int read_exec(struct page_collect *pcol)
*pcol_copy = *pcol;
ios->done = readpages_done;
ios->private = pcol_copy;
- ret = exofs_oi_read(oi, ios);
+ ret = ore_read(ios);
if (unlikely(ret))
goto err;
atomic_inc(&pcol->sbi->s_curr_pending);
EXOFS_DBGMSG2("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
- ios->obj.id, _LLU(ios->offset), pcol->length);
+ oi->one_comp.obj.id, _LLU(ios->offset), pcol->length);
/* pages ownership was passed to pcol_copy */
_pcol_reset(pcol);
@@ -457,14 +458,14 @@ static int exofs_readpage(struct file *file, struct page *page)
}
/* Callback for osd_write. All writes are asynchronous */
-static void writepages_done(struct exofs_io_state *ios, void *p)
+static void writepages_done(struct ore_io_state *ios, void *p)
{
struct page_collect *pcol = p;
int i;
u64 resid;
u64 good_bytes;
u64 length = 0;
- int ret = exofs_check_io(ios, &resid);
+ int ret = ore_check_io(ios, &resid);
atomic_dec(&pcol->sbi->s_curr_pending);
@@ -507,13 +508,21 @@ static void writepages_done(struct exofs_io_state *ios, void *p)
static int write_exec(struct page_collect *pcol)
{
struct exofs_i_info *oi = exofs_i(pcol->inode);
- struct exofs_io_state *ios = pcol->ios;
+ struct ore_io_state *ios;
struct page_collect *pcol_copy = NULL;
int ret;
if (!pcol->pages)
return 0;
+ BUG_ON(pcol->ios);
+ ret = ore_get_rw_state(&pcol->sbi->layout, &oi->comps, false,
+ pcol->pg_first << PAGE_CACHE_SHIFT,
+ pcol->length, &pcol->ios);
+
+ if (unlikely(ret))
+ goto err;
+
pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
if (!pcol_copy) {
EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
@@ -523,16 +532,15 @@ static int write_exec(struct page_collect *pcol)
*pcol_copy = *pcol;
+ ios = pcol->ios;
ios->pages = pcol_copy->pages;
ios->nr_pages = pcol_copy->nr_pages;
- ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT;
- ios->length = pcol_copy->length;
ios->done = writepages_done;
ios->private = pcol_copy;
- ret = exofs_oi_write(oi, ios);
+ ret = ore_write(ios);
if (unlikely(ret)) {
- EXOFS_ERR("write_exec: exofs_oi_write() Failed\n");
+ EXOFS_ERR("write_exec: ore_write() Failed\n");
goto err;
}
@@ -844,17 +852,15 @@ static inline int exofs_inode_is_fast_symlink(struct inode *inode)
return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
}
-const struct osd_attr g_attr_logical_length = ATTR_DEF(
- OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
-
static int _do_truncate(struct inode *inode, loff_t newsize)
{
struct exofs_i_info *oi = exofs_i(inode);
+ struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
int ret;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- ret = exofs_oi_truncate(oi, (u64)newsize);
+ ret = ore_truncate(&sbi->layout, &oi->comps, (u64)newsize);
if (likely(!ret))
truncate_setsize(inode, newsize);
@@ -917,30 +923,26 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
[1] = g_attr_inode_file_layout,
[2] = g_attr_inode_dir_layout,
};
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct exofs_on_disk_inode_layout *layout;
int ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
return ret;
}
- ios->obj.id = exofs_oi_objno(oi);
- exofs_make_credential(oi->i_cred, &ios->obj);
- ios->cred = oi->i_cred;
-
- attrs[1].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs);
- attrs[2].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs);
+ attrs[1].len = exofs_on_disk_inode_layout_size(sbi->comps.numdevs);
+ attrs[2].len = exofs_on_disk_inode_layout_size(sbi->comps.numdevs);
ios->in_attr = attrs;
ios->in_attr_len = ARRAY_SIZE(attrs);
- ret = exofs_sbi_read(ios);
+ ret = ore_read(ios);
if (unlikely(ret)) {
EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
- _LLU(ios->obj.id), ret);
+ _LLU(oi->one_comp.obj.id), ret);
memset(inode, 0, sizeof(*inode));
inode->i_mode = 0040000 | (0777 & ~022);
/* If object is lost on target we might as well enable it's
@@ -990,7 +992,7 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
}
out:
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ret;
}
@@ -1016,6 +1018,8 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
return inode;
oi = exofs_i(inode);
__oi_init(oi);
+ exofs_init_comps(&oi->comps, &oi->one_comp, sb->s_fs_info,
+ exofs_oi_objno(oi));
/* read the inode from the osd */
ret = exofs_get_inode(sb, oi, &fcb);
@@ -1107,21 +1111,22 @@ int __exofs_wait_obj_created(struct exofs_i_info *oi)
* set the obj_created flag so that other methods know that the object exists on
* the OSD.
*/
-static void create_done(struct exofs_io_state *ios, void *p)
+static void create_done(struct ore_io_state *ios, void *p)
{
struct inode *inode = p;
struct exofs_i_info *oi = exofs_i(inode);
struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
int ret;
- ret = exofs_check_io(ios, NULL);
- exofs_put_io_state(ios);
+ ret = ore_check_io(ios, NULL);
+ ore_put_io_state(ios);
atomic_dec(&sbi->s_curr_pending);
if (unlikely(ret)) {
EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
- _LLU(exofs_oi_objno(oi)), _LLU(sbi->layout.s_pid));
+ _LLU(exofs_oi_objno(oi)),
+ _LLU(oi->one_comp.obj.partition));
/*TODO: When FS is corrupted creation can fail, object already
* exist. Get rid of this asynchronous creation, if exist
* increment the obj counter and try the next object. Until we
@@ -1140,14 +1145,13 @@ static void create_done(struct exofs_io_state *ios, void *p)
*/
struct inode *exofs_new_inode(struct inode *dir, int mode)
{
- struct super_block *sb;
+ struct super_block *sb = dir->i_sb;
+ struct exofs_sb_info *sbi = sb->s_fs_info;
struct inode *inode;
struct exofs_i_info *oi;
- struct exofs_sb_info *sbi;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
int ret;
- sb = dir->i_sb;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -1157,8 +1161,6 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
set_obj_2bcreated(oi);
- sbi = sb->s_fs_info;
-
inode->i_mapping->backing_dev_info = sb->s_bdi;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
@@ -1170,25 +1172,24 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
spin_unlock(&sbi->s_next_gen_lock);
insert_inode_hash(inode);
+ exofs_init_comps(&oi->comps, &oi->one_comp, sb->s_fs_info,
+ exofs_oi_objno(oi));
exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
mark_inode_dirty(inode);
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n");
+ EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
return ERR_PTR(ret);
}
- ios->obj.id = exofs_oi_objno(oi);
- exofs_make_credential(oi->i_cred, &ios->obj);
-
ios->done = create_done;
ios->private = inode;
- ios->cred = oi->i_cred;
- ret = exofs_sbi_create(ios);
+
+ ret = ore_create(ios);
if (ret) {
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ERR_PTR(ret);
}
atomic_inc(&sbi->s_curr_pending);
@@ -1207,11 +1208,11 @@ struct updatei_args {
/*
* Callback function from exofs_update_inode().
*/
-static void updatei_done(struct exofs_io_state *ios, void *p)
+static void updatei_done(struct ore_io_state *ios, void *p)
{
struct updatei_args *args = p;
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
atomic_dec(&args->sbi->s_curr_pending);
@@ -1227,7 +1228,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
struct exofs_i_info *oi = exofs_i(inode);
struct super_block *sb = inode->i_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct osd_attr attr;
struct exofs_fcb *fcb;
struct updatei_args *args;
@@ -1266,9 +1267,9 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
} else
memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
goto free_args;
}
@@ -1285,13 +1286,13 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
ios->private = args;
}
- ret = exofs_oi_write(oi, ios);
+ ret = ore_write(ios);
if (!do_sync && !ret) {
atomic_inc(&sbi->s_curr_pending);
goto out; /* deallocation in updatei_done */
}
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
free_args:
kfree(args);
out:
@@ -1310,11 +1311,11 @@ int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
* Callback function from exofs_delete_inode() - don't have much cleaning up to
* do.
*/
-static void delete_done(struct exofs_io_state *ios, void *p)
+static void delete_done(struct ore_io_state *ios, void *p)
{
struct exofs_sb_info *sbi = p;
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
atomic_dec(&sbi->s_curr_pending);
}
@@ -1329,7 +1330,7 @@ void exofs_evict_inode(struct inode *inode)
struct exofs_i_info *oi = exofs_i(inode);
struct super_block *sb = inode->i_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
int ret;
truncate_inode_pages(&inode->i_data, 0);
@@ -1349,20 +1350,19 @@ void exofs_evict_inode(struct inode *inode)
/* ignore the error, attempt a remove anyway */
/* Now Remove the OSD objects */
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &oi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
return;
}
- ios->obj.id = exofs_oi_objno(oi);
ios->done = delete_done;
ios->private = sbi;
- ios->cred = oi->i_cred;
- ret = exofs_sbi_remove(ios);
+
+ ret = ore_remove(ios);
if (ret) {
- EXOFS_ERR("%s: exofs_sbi_remove failed\n", __func__);
- exofs_put_io_state(ios);
+ EXOFS_ERR("%s: ore_remove failed\n", __func__);
+ ore_put_io_state(ios);
return;
}
atomic_inc(&sbi->s_curr_pending);
diff --git a/fs/exofs/ios.c b/fs/exofs/ore.c
index f74a2ec027a..25305af8819 100644
--- a/fs/exofs/ios.c
+++ b/fs/exofs/ore.c
@@ -23,81 +23,87 @@
*/
#include <linux/slab.h>
-#include <scsi/scsi_device.h>
#include <asm/div64.h>
-#include "exofs.h"
+#include <scsi/osd_ore.h>
-#define EXOFS_DBGMSG2(M...) do {} while (0)
-/* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */
+#define ORE_ERR(fmt, a...) printk(KERN_ERR "ore: " fmt, ##a)
-void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
-{
- osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
-}
+#ifdef CONFIG_EXOFS_DEBUG
+#define ORE_DBGMSG(fmt, a...) \
+ printk(KERN_NOTICE "ore @%s:%d: " fmt, __func__, __LINE__, ##a)
+#else
+#define ORE_DBGMSG(fmt, a...) \
+ do { if (0) printk(fmt, ##a); } while (0)
+#endif
-int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
- u64 offset, void *p, unsigned length)
-{
- struct osd_request *or = osd_start_request(od, GFP_KERNEL);
-/* struct osd_sense_info osi = {.key = 0};*/
- int ret;
+/* u64 has problems with printk this will cast it to unsigned long long */
+#define _LLU(x) (unsigned long long)(x)
- if (unlikely(!or)) {
- EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
- return -ENOMEM;
- }
- ret = osd_req_read_kern(or, obj, offset, p, length);
- if (unlikely(ret)) {
- EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
- goto out;
- }
+#define ORE_DBGMSG2(M...) do {} while (0)
+/* #define ORE_DBGMSG2 ORE_DBGMSG */
- ret = osd_finalize_request(or, 0, cred, NULL);
- if (unlikely(ret)) {
- EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n", ret);
- goto out;
- }
+MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
+MODULE_DESCRIPTION("Objects Raid Engine ore.ko");
+MODULE_LICENSE("GPL");
- ret = osd_execute_request(or);
- if (unlikely(ret))
- EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
- /* osd_req_decode_sense(or, ret); */
+static u8 *_ios_cred(struct ore_io_state *ios, unsigned index)
+{
+ return ios->comps->comps[index & ios->comps->single_comp].cred;
+}
-out:
- osd_end_request(or);
- return ret;
+static struct osd_obj_id *_ios_obj(struct ore_io_state *ios, unsigned index)
+{
+ return &ios->comps->comps[index & ios->comps->single_comp].obj;
}
-int exofs_get_io_state(struct exofs_layout *layout,
- struct exofs_io_state **pios)
+static struct osd_dev *_ios_od(struct ore_io_state *ios, unsigned index)
{
- struct exofs_io_state *ios;
+ return ios->comps->ods[index];
+}
+
+int ore_get_rw_state(struct ore_layout *layout, struct ore_components *comps,
+ bool is_reading, u64 offset, u64 length,
+ struct ore_io_state **pios)
+{
+ struct ore_io_state *ios;
/*TODO: Maybe use kmem_cach per sbi of size
* exofs_io_state_size(layout->s_numdevs)
*/
- ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL);
+ ios = kzalloc(ore_io_state_size(comps->numdevs), GFP_KERNEL);
if (unlikely(!ios)) {
- EXOFS_DBGMSG("Failed kzalloc bytes=%d\n",
- exofs_io_state_size(layout->s_numdevs));
+ ORE_DBGMSG("Failed kzalloc bytes=%d\n",
+ ore_io_state_size(comps->numdevs));
*pios = NULL;
return -ENOMEM;
}
ios->layout = layout;
- ios->obj.partition = layout->s_pid;
+ ios->comps = comps;
+ ios->offset = offset;
+ ios->length = length;
+ ios->reading = is_reading;
+
*pios = ios;
return 0;
}
+EXPORT_SYMBOL(ore_get_rw_state);
+
+int ore_get_io_state(struct ore_layout *layout, struct ore_components *comps,
+ struct ore_io_state **ios)
+{
+ return ore_get_rw_state(layout, comps, true, 0, 0, ios);
+}
+EXPORT_SYMBOL(ore_get_io_state);
-void exofs_put_io_state(struct exofs_io_state *ios)
+void ore_put_io_state(struct ore_io_state *ios)
{
if (ios) {
unsigned i;
for (i = 0; i < ios->numdevs; i++) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[i];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[i];
if (per_dev->or)
osd_end_request(per_dev->or);
@@ -108,31 +114,9 @@ void exofs_put_io_state(struct exofs_io_state *ios)
kfree(ios);
}
}
+EXPORT_SYMBOL(ore_put_io_state);
-unsigned exofs_layout_od_id(struct exofs_layout *layout,
- osd_id obj_no, unsigned layout_index)
-{
-/* switch (layout->lay_func) {
- case LAYOUT_MOVING_WINDOW:
- {*/
- unsigned dev_mod = obj_no;
-
- return (layout_index + dev_mod * layout->mirrors_p1) %
- layout->s_numdevs;
-/* }
- case LAYOUT_FUNC_IMPLICT:
- return layout->devs[layout_index];
- }*/
-}
-
-static inline struct osd_dev *exofs_ios_od(struct exofs_io_state *ios,
- unsigned layout_index)
-{
- return ios->layout->s_ods[
- exofs_layout_od_id(ios->layout, ios->obj.id, layout_index)];
-}
-
-static void _sync_done(struct exofs_io_state *ios, void *p)
+static void _sync_done(struct ore_io_state *ios, void *p)
{
struct completion *waiting = p;
@@ -141,20 +125,20 @@ static void _sync_done(struct exofs_io_state *ios, void *p)
static void _last_io(struct kref *kref)
{
- struct exofs_io_state *ios = container_of(
- kref, struct exofs_io_state, kref);
+ struct ore_io_state *ios = container_of(
+ kref, struct ore_io_state, kref);
ios->done(ios, ios->private);
}
static void _done_io(struct osd_request *or, void *p)
{
- struct exofs_io_state *ios = p;
+ struct ore_io_state *ios = p;
kref_put(&ios->kref, _last_io);
}
-static int exofs_io_execute(struct exofs_io_state *ios)
+static int ore_io_execute(struct ore_io_state *ios)
{
DECLARE_COMPLETION_ONSTACK(wait);
bool sync = (ios->done == NULL);
@@ -170,9 +154,9 @@ static int exofs_io_execute(struct exofs_io_state *ios)
if (unlikely(!or))
continue;
- ret = osd_finalize_request(or, 0, ios->cred, NULL);
+ ret = osd_finalize_request(or, 0, _ios_cred(ios, i), NULL);
if (unlikely(ret)) {
- EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n",
+ ORE_DBGMSG("Failed to osd_finalize_request() => %d\n",
ret);
return ret;
}
@@ -194,7 +178,7 @@ static int exofs_io_execute(struct exofs_io_state *ios)
if (sync) {
wait_for_completion(&wait);
- ret = exofs_check_io(ios, NULL);
+ ret = ore_check_io(ios, NULL);
}
return ret;
}
@@ -214,7 +198,7 @@ static void _clear_bio(struct bio *bio)
}
}
-int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
+int ore_check_io(struct ore_io_state *ios, u64 *resid)
{
enum osd_err_priority acumulated_osd_err = 0;
int acumulated_lin_err = 0;
@@ -235,7 +219,7 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
/* start read offset passed endof file */
_clear_bio(ios->per_dev[i].bio);
- EXOFS_DBGMSG("start read offset passed end of file "
+ ORE_DBGMSG("start read offset passed end of file "
"offset=0x%llx, length=0x%llx\n",
_LLU(ios->per_dev[i].offset),
_LLU(ios->per_dev[i].length));
@@ -259,6 +243,7 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
return acumulated_lin_err;
}
+EXPORT_SYMBOL(ore_check_io);
/*
* L - logical offset into the file
@@ -305,20 +290,21 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
struct _striping_info {
u64 obj_offset;
u64 group_length;
+ u64 M; /* for truncate */
unsigned dev;
unsigned unit_off;
};
-static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
+static void _calc_stripe_info(struct ore_layout *layout, u64 file_offset,
struct _striping_info *si)
{
- u32 stripe_unit = ios->layout->stripe_unit;
- u32 group_width = ios->layout->group_width;
- u64 group_depth = ios->layout->group_depth;
+ u32 stripe_unit = layout->stripe_unit;
+ u32 group_width = layout->group_width;
+ u64 group_depth = layout->group_depth;
u32 U = stripe_unit * group_width;
u64 T = U * group_depth;
- u64 S = T * ios->layout->group_count;
+ u64 S = T * layout->group_count;
u64 M = div64_u64(file_offset, S);
/*
@@ -333,7 +319,7 @@ static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
/* "H - (N * U)" is just "H % U" so it's bound to u32 */
si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width;
- si->dev *= ios->layout->mirrors_p1;
+ si->dev *= layout->mirrors_p1;
div_u64_rem(file_offset, stripe_unit, &si->unit_off);
@@ -341,15 +327,16 @@ static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
(M * group_depth * stripe_unit);
si->group_length = T - H;
+ si->M = M;
}
-static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
- unsigned pgbase, struct exofs_per_dev_state *per_dev,
+static int _add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
+ unsigned pgbase, struct ore_per_dev_state *per_dev,
int cur_len)
{
unsigned pg = *cur_pg;
struct request_queue *q =
- osd_request_queue(exofs_ios_od(ios, per_dev->dev));
+ osd_request_queue(_ios_od(ios, per_dev->dev));
per_dev->length += cur_len;
@@ -361,7 +348,7 @@ static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
if (unlikely(!per_dev->bio)) {
- EXOFS_DBGMSG("Failed to allocate BIO size=%u\n",
+ ORE_DBGMSG("Failed to allocate BIO size=%u\n",
bio_size);
return -ENOMEM;
}
@@ -387,7 +374,7 @@ static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
return 0;
}
-static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
+static int _prepare_one_group(struct ore_io_state *ios, u64 length,
struct _striping_info *si)
{
unsigned stripe_unit = ios->layout->stripe_unit;
@@ -400,7 +387,7 @@ static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
int ret = 0;
while (length) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[dev];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[dev];
unsigned cur_len, page_off = 0;
if (!per_dev->length) {
@@ -443,7 +430,7 @@ out:
return ret;
}
-static int _prepare_for_striping(struct exofs_io_state *ios)
+static int _prepare_for_striping(struct ore_io_state *ios)
{
u64 length = ios->length;
u64 offset = ios->offset;
@@ -452,9 +439,9 @@ static int _prepare_for_striping(struct exofs_io_state *ios)
if (!ios->pages) {
if (ios->kern_buff) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[0];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[0];
- _calc_stripe_info(ios, ios->offset, &si);
+ _calc_stripe_info(ios->layout, ios->offset, &si);
per_dev->offset = si.obj_offset;
per_dev->dev = si.dev;
@@ -468,7 +455,7 @@ static int _prepare_for_striping(struct exofs_io_state *ios)
}
while (length) {
- _calc_stripe_info(ios, offset, &si);
+ _calc_stripe_info(ios->layout, offset, &si);
if (length < si.group_length)
si.group_length = length;
@@ -485,57 +472,59 @@ out:
return ret;
}
-int exofs_sbi_create(struct exofs_io_state *ios)
+int ore_create(struct ore_io_state *ios)
{
int i, ret;
- for (i = 0; i < ios->layout->s_numdevs; i++) {
+ for (i = 0; i < ios->comps->numdevs; i++) {
struct osd_request *or;
- or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
ios->per_dev[i].or = or;
ios->numdevs++;
- osd_req_create_object(or, &ios->obj);
+ osd_req_create_object(or, _ios_obj(ios, i));
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
out:
return ret;
}
+EXPORT_SYMBOL(ore_create);
-int exofs_sbi_remove(struct exofs_io_state *ios)
+int ore_remove(struct ore_io_state *ios)
{
int i, ret;
- for (i = 0; i < ios->layout->s_numdevs; i++) {
+ for (i = 0; i < ios->comps->numdevs; i++) {
struct osd_request *or;
- or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
ios->per_dev[i].or = or;
ios->numdevs++;
- osd_req_remove_object(or, &ios->obj);
+ osd_req_remove_object(or, _ios_obj(ios, i));
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
out:
return ret;
}
+EXPORT_SYMBOL(ore_remove);
-static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
+static int _write_mirror(struct ore_io_state *ios, int cur_comp)
{
- struct exofs_per_dev_state *master_dev = &ios->per_dev[cur_comp];
+ struct ore_per_dev_state *master_dev = &ios->per_dev[cur_comp];
unsigned dev = ios->per_dev[cur_comp].dev;
unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
int ret = 0;
@@ -544,12 +533,12 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
return 0; /* Just an empty slot */
for (; cur_comp < last_comp; ++cur_comp, ++dev) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
struct osd_request *or;
- or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, dev), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
@@ -563,7 +552,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
bio = bio_kmalloc(GFP_KERNEL,
master_dev->bio->bi_max_vecs);
if (unlikely(!bio)) {
- EXOFS_DBGMSG(
+ ORE_DBGMSG(
"Failed to allocate BIO size=%u\n",
master_dev->bio->bi_max_vecs);
ret = -ENOMEM;
@@ -582,25 +571,29 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
bio->bi_rw |= REQ_WRITE;
}
- osd_req_write(or, &ios->obj, per_dev->offset, bio,
- per_dev->length);
- EXOFS_DBGMSG("write(0x%llx) offset=0x%llx "
+ osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
+ bio, per_dev->length);
+ ORE_DBGMSG("write(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(ios->obj.id), _LLU(per_dev->offset),
+ _LLU(_ios_obj(ios, dev)->id),
+ _LLU(per_dev->offset),
_LLU(per_dev->length), dev);
} else if (ios->kern_buff) {
- ret = osd_req_write_kern(or, &ios->obj, per_dev->offset,
- ios->kern_buff, ios->length);
+ ret = osd_req_write_kern(or, _ios_obj(ios, dev),
+ per_dev->offset,
+ ios->kern_buff, ios->length);
if (unlikely(ret))
goto out;
- EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
+ ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(ios->obj.id), _LLU(per_dev->offset),
+ _LLU(_ios_obj(ios, dev)->id),
+ _LLU(per_dev->offset),
_LLU(ios->length), dev);
} else {
- osd_req_set_attributes(or, &ios->obj);
- EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
- _LLU(ios->obj.id), ios->out_attr_len, dev);
+ osd_req_set_attributes(or, _ios_obj(ios, dev));
+ ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
+ _LLU(_ios_obj(ios, dev)->id),
+ ios->out_attr_len, dev);
}
if (ios->out_attr)
@@ -616,7 +609,7 @@ out:
return ret;
}
-int exofs_sbi_write(struct exofs_io_state *ios)
+int ore_write(struct ore_io_state *ios)
{
int i;
int ret;
@@ -626,52 +619,55 @@ int exofs_sbi_write(struct exofs_io_state *ios)
return ret;
for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
- ret = _sbi_write_mirror(ios, i);
+ ret = _write_mirror(ios, i);
if (unlikely(ret))
return ret;
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
return ret;
}
+EXPORT_SYMBOL(ore_write);
-static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp)
+static int _read_mirror(struct ore_io_state *ios, unsigned cur_comp)
{
struct osd_request *or;
- struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
- unsigned first_dev = (unsigned)ios->obj.id;
+ struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
+ struct osd_obj_id *obj = _ios_obj(ios, cur_comp);
+ unsigned first_dev = (unsigned)obj->id;
if (ios->pages && !per_dev->length)
return 0; /* Just an empty slot */
first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
- or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, first_dev), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
return -ENOMEM;
}
per_dev->or = or;
if (ios->pages) {
- osd_req_read(or, &ios->obj, per_dev->offset,
+ osd_req_read(or, obj, per_dev->offset,
per_dev->bio, per_dev->length);
- EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
- " dev=%d\n", _LLU(ios->obj.id),
+ ORE_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
+ " dev=%d\n", _LLU(obj->id),
_LLU(per_dev->offset), _LLU(per_dev->length),
first_dev);
} else if (ios->kern_buff) {
- int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset,
+ int ret = osd_req_read_kern(or, obj, per_dev->offset,
ios->kern_buff, ios->length);
- EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
+ ORE_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d ret=>%d\n",
- _LLU(ios->obj.id), _LLU(per_dev->offset),
+ _LLU(obj->id), _LLU(per_dev->offset),
_LLU(ios->length), first_dev, ret);
if (unlikely(ret))
return ret;
} else {
- osd_req_get_attributes(or, &ios->obj);
- EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
- _LLU(ios->obj.id), ios->in_attr_len, first_dev);
+ osd_req_get_attributes(or, obj);
+ ORE_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
+ _LLU(obj->id),
+ ios->in_attr_len, first_dev);
}
if (ios->out_attr)
osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
@@ -682,7 +678,7 @@ static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp)
return 0;
}
-int exofs_sbi_read(struct exofs_io_state *ios)
+int ore_read(struct ore_io_state *ios)
{
int i;
int ret;
@@ -692,16 +688,17 @@ int exofs_sbi_read(struct exofs_io_state *ios)
return ret;
for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
- ret = _sbi_read_mirror(ios, i);
+ ret = _read_mirror(ios, i);
if (unlikely(ret))
return ret;
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
return ret;
}
+EXPORT_SYMBOL(ore_read);
-int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
+int extract_attr_from_ios(struct ore_io_state *ios, struct osd_attr *attr)
{
struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
void *iter = NULL;
@@ -721,83 +718,118 @@ int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr)
return -EIO;
}
+EXPORT_SYMBOL(extract_attr_from_ios);
-static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp,
+static int _truncate_mirrors(struct ore_io_state *ios, unsigned cur_comp,
struct osd_attr *attr)
{
int last_comp = cur_comp + ios->layout->mirrors_p1;
for (; cur_comp < last_comp; ++cur_comp) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp];
+ struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
struct osd_request *or;
- or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL);
+ or = osd_start_request(_ios_od(ios, cur_comp), GFP_KERNEL);
if (unlikely(!or)) {
- EXOFS_ERR("%s: osd_start_request failed\n", __func__);
+ ORE_ERR("%s: osd_start_request failed\n", __func__);
return -ENOMEM;
}
per_dev->or = or;
- osd_req_set_attributes(or, &ios->obj);
+ osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
osd_req_add_set_attr_list(or, attr, 1);
}
return 0;
}
-int exofs_oi_truncate(struct exofs_i_info *oi, u64 size)
+struct _trunc_info {
+ struct _striping_info si;
+ u64 prev_group_obj_off;
+ u64 next_group_obj_off;
+
+ unsigned first_group_dev;
+ unsigned nex_group_dev;
+ unsigned max_devs;
+};
+
+void _calc_trunk_info(struct ore_layout *layout, u64 file_offset,
+ struct _trunc_info *ti)
+{
+ unsigned stripe_unit = layout->stripe_unit;
+
+ _calc_stripe_info(layout, file_offset, &ti->si);
+
+ ti->prev_group_obj_off = ti->si.M * stripe_unit;
+ ti->next_group_obj_off = ti->si.M ? (ti->si.M - 1) * stripe_unit : 0;
+
+ ti->first_group_dev = ti->si.dev - (ti->si.dev % layout->group_width);
+ ti->nex_group_dev = ti->first_group_dev + layout->group_width;
+ ti->max_devs = layout->group_width * layout->group_count;
+}
+
+int ore_truncate(struct ore_layout *layout, struct ore_components *comps,
+ u64 size)
{
- struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct exofs_trunc_attr {
struct osd_attr attr;
__be64 newsize;
} *size_attrs;
- struct _striping_info si;
+ struct _trunc_info ti;
int i, ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(layout, comps, &ios);
if (unlikely(ret))
return ret;
- size_attrs = kcalloc(ios->layout->group_width, sizeof(*size_attrs),
+ _calc_trunk_info(ios->layout, size, &ti);
+
+ size_attrs = kcalloc(ti.max_devs, sizeof(*size_attrs),
GFP_KERNEL);
if (unlikely(!size_attrs)) {
ret = -ENOMEM;
goto out;
}
- ios->obj.id = exofs_oi_objno(oi);
- ios->cred = oi->i_cred;
+ ios->numdevs = ios->comps->numdevs;
- ios->numdevs = ios->layout->s_numdevs;
- _calc_stripe_info(ios, size, &si);
-
- for (i = 0; i < ios->layout->group_width; ++i) {
+ for (i = 0; i < ti.max_devs; ++i) {
struct exofs_trunc_attr *size_attr = &size_attrs[i];
u64 obj_size;
- if (i < si.dev)
- obj_size = si.obj_offset +
- ios->layout->stripe_unit - si.unit_off;
- else if (i == si.dev)
- obj_size = si.obj_offset;
- else /* i > si.dev */
- obj_size = si.obj_offset - si.unit_off;
+ if (i < ti.first_group_dev)
+ obj_size = ti.prev_group_obj_off;
+ else if (i >= ti.nex_group_dev)
+ obj_size = ti.next_group_obj_off;
+ else if (i < ti.si.dev) /* dev within this group */
+ obj_size = ti.si.obj_offset +
+ ios->layout->stripe_unit - ti.si.unit_off;
+ else if (i == ti.si.dev)
+ obj_size = ti.si.obj_offset;
+ else /* i > ti.dev */
+ obj_size = ti.si.obj_offset - ti.si.unit_off;
size_attr->newsize = cpu_to_be64(obj_size);
size_attr->attr = g_attr_logical_length;
size_attr->attr.val_ptr = &size_attr->newsize;
+ ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
+ _LLU(comps->comps->obj.id), _LLU(obj_size), i);
ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
&size_attr->attr);
if (unlikely(ret))
goto out;
}
- ret = exofs_io_execute(ios);
+ ret = ore_io_execute(ios);
out:
kfree(size_attrs);
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ret;
}
+EXPORT_SYMBOL(ore_truncate);
+
+const struct osd_attr g_attr_logical_length = ATTR_DEF(
+ OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
+EXPORT_SYMBOL(g_attr_logical_length);
diff --git a/fs/exofs/pnfs.h b/fs/exofs/pnfs.h
deleted file mode 100644
index c52e9888b8a..00000000000
--- a/fs/exofs/pnfs.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <bharrosh@panasas.com>
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify it under the
- * terms of the GNU General Public License version 2 as published by the Free
- * Software Foundation.
- *
- */
-
-/* FIXME: Remove this file once pnfs hits mainline */
-
-#ifndef __EXOFS_PNFS_H__
-#define __EXOFS_PNFS_H__
-
-#if ! defined(__PNFS_OSD_XDR_H__)
-
-enum pnfs_iomode {
- IOMODE_READ = 1,
- IOMODE_RW = 2,
- IOMODE_ANY = 3,
-};
-
-/* Layout Structure */
-enum pnfs_osd_raid_algorithm4 {
- PNFS_OSD_RAID_0 = 1,
- PNFS_OSD_RAID_4 = 2,
- PNFS_OSD_RAID_5 = 3,
- PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */
-};
-
-struct pnfs_osd_data_map {
- u32 odm_num_comps;
- u64 odm_stripe_unit;
- u32 odm_group_width;
- u32 odm_group_depth;
- u32 odm_mirror_cnt;
- u32 odm_raid_algorithm;
-};
-
-#endif /* ! defined(__PNFS_OSD_XDR_H__) */
-
-#endif /* __EXOFS_PNFS_H__ */
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index c57beddcc21..274894053b0 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -40,6 +40,8 @@
#include "exofs.h"
+#define EXOFS_DBGMSG2(M...) do {} while (0)
+
/******************************************************************************
* MOUNT OPTIONS
*****************************************************************************/
@@ -208,10 +210,48 @@ static void destroy_inodecache(void)
}
/******************************************************************************
- * SUPERBLOCK FUNCTIONS
+ * Some osd helpers
*****************************************************************************/
-static const struct super_operations exofs_sops;
-static const struct export_operations exofs_export_ops;
+void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
+{
+ osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
+}
+
+static int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
+ u64 offset, void *p, unsigned length)
+{
+ struct osd_request *or = osd_start_request(od, GFP_KERNEL);
+/* struct osd_sense_info osi = {.key = 0};*/
+ int ret;
+
+ if (unlikely(!or)) {
+ EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
+ return -ENOMEM;
+ }
+ ret = osd_req_read_kern(or, obj, offset, p, length);
+ if (unlikely(ret)) {
+ EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
+ goto out;
+ }
+
+ ret = osd_finalize_request(or, 0, cred, NULL);
+ if (unlikely(ret)) {
+ EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n", ret);
+ goto out;
+ }
+
+ ret = osd_execute_request(or);
+ if (unlikely(ret))
+ EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
+ /* osd_req_decode_sense(or, ret); */
+
+out:
+ osd_end_request(or);
+ EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%p ret=>%d\n",
+ _LLU(obj->id), _LLU(offset), _LLU(length), od, ret);
+ return ret;
+}
static const struct osd_attr g_attr_sb_stats = ATTR_DEF(
EXOFS_APAGE_SB_DATA,
@@ -223,21 +263,19 @@ static int __sbi_read_stats(struct exofs_sb_info *sbi)
struct osd_attr attrs[] = {
[0] = g_attr_sb_stats,
};
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
int ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &sbi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
return ret;
}
- ios->cred = sbi->s_cred;
-
ios->in_attr = attrs;
ios->in_attr_len = ARRAY_SIZE(attrs);
- ret = exofs_sbi_read(ios);
+ ret = ore_read(ios);
if (unlikely(ret)) {
EXOFS_ERR("Error reading super_block stats => %d\n", ret);
goto out;
@@ -264,13 +302,13 @@ static int __sbi_read_stats(struct exofs_sb_info *sbi)
}
out:
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ret;
}
-static void stats_done(struct exofs_io_state *ios, void *p)
+static void stats_done(struct ore_io_state *ios, void *p)
{
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
/* Good thanks nothing to do anymore */
}
@@ -280,12 +318,12 @@ int exofs_sbi_write_stats(struct exofs_sb_info *sbi)
struct osd_attr attrs[] = {
[0] = g_attr_sb_stats,
};
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
int ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &sbi->comps, &ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__);
+ EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
return ret;
}
@@ -293,21 +331,27 @@ int exofs_sbi_write_stats(struct exofs_sb_info *sbi)
sbi->s_ess.s_numfiles = cpu_to_le64(sbi->s_numfiles);
attrs[0].val_ptr = &sbi->s_ess;
- ios->cred = sbi->s_cred;
+
ios->done = stats_done;
ios->private = sbi;
ios->out_attr = attrs;
ios->out_attr_len = ARRAY_SIZE(attrs);
- ret = exofs_sbi_write(ios);
+ ret = ore_write(ios);
if (unlikely(ret)) {
- EXOFS_ERR("%s: exofs_sbi_write failed.\n", __func__);
- exofs_put_io_state(ios);
+ EXOFS_ERR("%s: ore_write failed.\n", __func__);
+ ore_put_io_state(ios);
}
return ret;
}
+/******************************************************************************
+ * SUPERBLOCK FUNCTIONS
+ *****************************************************************************/
+static const struct super_operations exofs_sops;
+static const struct export_operations exofs_export_ops;
+
/*
* Write the superblock to the OSD
*/
@@ -315,7 +359,9 @@ int exofs_sync_fs(struct super_block *sb, int wait)
{
struct exofs_sb_info *sbi;
struct exofs_fscb *fscb;
- struct exofs_io_state *ios;
+ struct ore_comp one_comp;
+ struct ore_components comps;
+ struct ore_io_state *ios;
int ret = -ENOMEM;
fscb = kmalloc(sizeof(*fscb), GFP_KERNEL);
@@ -331,7 +377,10 @@ int exofs_sync_fs(struct super_block *sb, int wait)
* version). Otherwise the exofs_fscb is read-only from mkfs time. All
* the writeable info is set in exofs_sbi_write_stats() above.
*/
- ret = exofs_get_io_state(&sbi->layout, &ios);
+
+ exofs_init_comps(&comps, &one_comp, sbi, EXOFS_SUPER_ID);
+
+ ret = ore_get_io_state(&sbi->layout, &comps, &ios);
if (unlikely(ret))
goto out;
@@ -345,14 +394,12 @@ int exofs_sync_fs(struct super_block *sb, int wait)
fscb->s_newfs = 0;
fscb->s_version = EXOFS_FSCB_VER;
- ios->obj.id = EXOFS_SUPER_ID;
ios->offset = 0;
ios->kern_buff = fscb;
- ios->cred = sbi->s_cred;
- ret = exofs_sbi_write(ios);
+ ret = ore_write(ios);
if (unlikely(ret))
- EXOFS_ERR("%s: exofs_sbi_write failed.\n", __func__);
+ EXOFS_ERR("%s: ore_write failed.\n", __func__);
else
sb->s_dirt = 0;
@@ -360,7 +407,7 @@ int exofs_sync_fs(struct super_block *sb, int wait)
unlock_super(sb);
out:
EXOFS_DBGMSG("s_nextid=0x%llx ret=%d\n", _LLU(sbi->s_nextid), ret);
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
kfree(fscb);
return ret;
}
@@ -384,15 +431,17 @@ static void _exofs_print_device(const char *msg, const char *dev_path,
void exofs_free_sbi(struct exofs_sb_info *sbi)
{
- while (sbi->layout.s_numdevs) {
- int i = --sbi->layout.s_numdevs;
- struct osd_dev *od = sbi->layout.s_ods[i];
+ while (sbi->comps.numdevs) {
+ int i = --sbi->comps.numdevs;
+ struct osd_dev *od = sbi->comps.ods[i];
if (od) {
- sbi->layout.s_ods[i] = NULL;
+ sbi->comps.ods[i] = NULL;
osduld_put_device(od);
}
}
+ if (sbi->comps.ods != sbi->_min_one_dev)
+ kfree(sbi->comps.ods);
kfree(sbi);
}
@@ -419,8 +468,8 @@ static void exofs_put_super(struct super_block *sb)
msecs_to_jiffies(100));
}
- _exofs_print_device("Unmounting", NULL, sbi->layout.s_ods[0],
- sbi->layout.s_pid);
+ _exofs_print_device("Unmounting", NULL, sbi->comps.ods[0],
+ sbi->one_comp.obj.partition);
bdi_destroy(&sbi->bdi);
exofs_free_sbi(sbi);
@@ -501,10 +550,19 @@ static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
return -EINVAL;
}
+ EXOFS_DBGMSG("exofs: layout: "
+ "num_comps=%u stripe_unit=0x%x group_width=%u "
+ "group_depth=0x%llx mirrors_p1=%u raid_algorithm=%u\n",
+ numdevs,
+ sbi->layout.stripe_unit,
+ sbi->layout.group_width,
+ _LLU(sbi->layout.group_depth),
+ sbi->layout.mirrors_p1,
+ sbi->data_map.odm_raid_algorithm);
return 0;
}
-static unsigned __ra_pages(struct exofs_layout *layout)
+static unsigned __ra_pages(struct ore_layout *layout)
{
const unsigned _MIN_RA = 32; /* min 128K read-ahead */
unsigned ra_pages = layout->group_width * layout->stripe_unit /
@@ -547,13 +605,11 @@ static int exofs_devs_2_odi(struct exofs_dt_device_info *dt_dev,
return !(odi->systemid_len || odi->osdname_len);
}
-static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
+static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
+ struct osd_dev *fscb_od,
unsigned table_count)
{
- struct exofs_sb_info *sbi = *psbi;
- struct osd_dev *fscb_od;
- struct osd_obj_id obj = {.partition = sbi->layout.s_pid,
- .id = EXOFS_DEVTABLE_ID};
+ struct ore_comp comp;
struct exofs_device_table *dt;
unsigned table_bytes = table_count * sizeof(dt->dt_dev_table[0]) +
sizeof(*dt);
@@ -567,10 +623,14 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
return -ENOMEM;
}
- fscb_od = sbi->layout.s_ods[0];
- sbi->layout.s_ods[0] = NULL;
- sbi->layout.s_numdevs = 0;
- ret = exofs_read_kern(fscb_od, sbi->s_cred, &obj, 0, dt, table_bytes);
+ sbi->comps.numdevs = 0;
+
+ comp.obj.partition = sbi->one_comp.obj.partition;
+ comp.obj.id = EXOFS_DEVTABLE_ID;
+ exofs_make_credential(comp.cred, &comp.obj);
+
+ ret = exofs_read_kern(fscb_od, comp.cred, &comp.obj, 0, dt,
+ table_bytes);
if (unlikely(ret)) {
EXOFS_ERR("ERROR: reading device table\n");
goto out;
@@ -588,16 +648,18 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
goto out;
if (likely(numdevs > 1)) {
- unsigned size = numdevs * sizeof(sbi->layout.s_ods[0]);
+ unsigned size = numdevs * sizeof(sbi->comps.ods[0]);
- sbi = krealloc(sbi, sizeof(*sbi) + size, GFP_KERNEL);
- if (unlikely(!sbi)) {
+ /* Twice bigger table: See exofs_init_comps() and below
+ * comment
+ */
+ sbi->comps.ods = kzalloc(size + size - 1, GFP_KERNEL);
+ if (unlikely(!sbi->comps.ods)) {
+ EXOFS_ERR("ERROR: faild allocating Device array[%d]\n",
+ numdevs);
ret = -ENOMEM;
goto out;
}
- memset(&sbi->layout.s_ods[1], 0,
- size - sizeof(sbi->layout.s_ods[0]));
- *psbi = sbi;
}
for (i = 0; i < numdevs; i++) {
@@ -619,8 +681,8 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
* line. We always keep them in device-table order.
*/
if (fscb_od && osduld_device_same(fscb_od, &odi)) {
- sbi->layout.s_ods[i] = fscb_od;
- ++sbi->layout.s_numdevs;
+ sbi->comps.ods[i] = fscb_od;
+ ++sbi->comps.numdevs;
fscb_od = NULL;
continue;
}
@@ -633,13 +695,13 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
goto out;
}
- sbi->layout.s_ods[i] = od;
- ++sbi->layout.s_numdevs;
+ sbi->comps.ods[i] = od;
+ ++sbi->comps.numdevs;
/* Read the fscb of the other devices to make sure the FS
* partition is there.
*/
- ret = exofs_read_kern(od, sbi->s_cred, &obj, 0, &fscb,
+ ret = exofs_read_kern(od, comp.cred, &comp.obj, 0, &fscb,
sizeof(fscb));
if (unlikely(ret)) {
EXOFS_ERR("ERROR: Malformed participating device "
@@ -656,13 +718,22 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi,
out:
kfree(dt);
- if (unlikely(!ret && fscb_od)) {
- EXOFS_ERR(
- "ERROR: Bad device-table container device not present\n");
- osduld_put_device(fscb_od);
- ret = -EINVAL;
- }
+ if (likely(!ret)) {
+ unsigned numdevs = sbi->comps.numdevs;
+ if (unlikely(fscb_od)) {
+ EXOFS_ERR("ERROR: Bad device-table container device not present\n");
+ osduld_put_device(fscb_od);
+ return -EINVAL;
+ }
+ /* exofs round-robins the device table view according to inode
+ * number. We hold a: twice bigger table hence inodes can point
+ * to any device and have a sequential view of the table
+ * starting at this device. See exofs_init_comps()
+ */
+ for (i = 0; i < numdevs - 1; ++i)
+ sbi->comps.ods[i + numdevs] = sbi->comps.ods[i];
+ }
return ret;
}
@@ -676,7 +747,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
struct exofs_sb_info *sbi; /*extended info */
struct osd_dev *od; /* Master device */
struct exofs_fscb fscb; /*on-disk superblock info */
- struct osd_obj_id obj;
+ struct ore_comp comp;
unsigned table_count;
int ret;
@@ -684,10 +755,6 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi)
return -ENOMEM;
- ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
- if (ret)
- goto free_bdi;
-
/* use mount options to fill superblock */
if (opts->is_osdname) {
struct osd_dev_info odi = {.systemid_len = 0};
@@ -695,6 +762,8 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
odi.osdname_len = strlen(opts->dev_name);
odi.osdname = (u8 *)opts->dev_name;
od = osduld_info_lookup(&odi);
+ kfree(opts->dev_name);
+ opts->dev_name = NULL;
} else {
od = osduld_path_lookup(opts->dev_name);
}
@@ -709,11 +778,16 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
sbi->layout.group_width = 1;
sbi->layout.group_depth = -1;
sbi->layout.group_count = 1;
- sbi->layout.s_ods[0] = od;
- sbi->layout.s_numdevs = 1;
- sbi->layout.s_pid = opts->pid;
sbi->s_timeout = opts->timeout;
+ sbi->one_comp.obj.partition = opts->pid;
+ sbi->one_comp.obj.id = 0;
+ exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
+ sbi->comps.numdevs = 1;
+ sbi->comps.single_comp = EC_SINGLE_COMP;
+ sbi->comps.comps = &sbi->one_comp;
+ sbi->comps.ods = sbi->_min_one_dev;
+
/* fill in some other data by hand */
memset(sb->s_id, 0, sizeof(sb->s_id));
strcpy(sb->s_id, "exofs");
@@ -724,11 +798,11 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_bdev = NULL;
sb->s_dev = 0;
- obj.partition = sbi->layout.s_pid;
- obj.id = EXOFS_SUPER_ID;
- exofs_make_credential(sbi->s_cred, &obj);
+ comp.obj.partition = sbi->one_comp.obj.partition;
+ comp.obj.id = EXOFS_SUPER_ID;
+ exofs_make_credential(comp.cred, &comp.obj);
- ret = exofs_read_kern(od, sbi->s_cred, &obj, 0, &fscb, sizeof(fscb));
+ ret = exofs_read_kern(od, comp.cred, &comp.obj, 0, &fscb, sizeof(fscb));
if (unlikely(ret))
goto free_sbi;
@@ -757,9 +831,11 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
table_count = le64_to_cpu(fscb.s_dev_table_count);
if (table_count) {
- ret = exofs_read_lookup_dev_table(&sbi, table_count);
+ ret = exofs_read_lookup_dev_table(sbi, od, table_count);
if (unlikely(ret))
goto free_sbi;
+ } else {
+ sbi->comps.ods[0] = od;
}
__sbi_read_stats(sbi);
@@ -793,20 +869,20 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
- _exofs_print_device("Mounting", opts->dev_name, sbi->layout.s_ods[0],
- sbi->layout.s_pid);
- if (opts->is_osdname)
- kfree(opts->dev_name);
+ ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
+ if (ret) {
+ EXOFS_DBGMSG("Failed to bdi_setup_and_register\n");
+ goto free_sbi;
+ }
+
+ _exofs_print_device("Mounting", opts->dev_name, sbi->comps.ods[0],
+ sbi->one_comp.obj.partition);
return 0;
free_sbi:
- bdi_destroy(&sbi->bdi);
-free_bdi:
EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
- opts->dev_name, sbi->layout.s_pid, ret);
+ opts->dev_name, sbi->one_comp.obj.partition, ret);
exofs_free_sbi(sbi);
- if (opts->is_osdname)
- kfree(opts->dev_name);
return ret;
}
@@ -837,7 +913,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
- struct exofs_io_state *ios;
+ struct ore_io_state *ios;
struct osd_attr attrs[] = {
ATTR_DEF(OSD_APAGE_PARTITION_QUOTAS,
OSD_ATTR_PQ_CAPACITY_QUOTA, sizeof(__be64)),
@@ -846,21 +922,18 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
};
uint64_t capacity = ULLONG_MAX;
uint64_t used = ULLONG_MAX;
- uint8_t cred_a[OSD_CAP_LEN];
int ret;
- ret = exofs_get_io_state(&sbi->layout, &ios);
+ ret = ore_get_io_state(&sbi->layout, &sbi->comps, &ios);
if (ret) {
- EXOFS_DBGMSG("exofs_get_io_state failed.\n");
+ EXOFS_DBGMSG("ore_get_io_state failed.\n");
return ret;
}
- exofs_make_credential(cred_a, &ios->obj);
- ios->cred = sbi->s_cred;
ios->in_attr = attrs;
ios->in_attr_len = ARRAY_SIZE(attrs);
- ret = exofs_sbi_read(ios);
+ ret = ore_read(ios);
if (unlikely(ret))
goto out;
@@ -889,7 +962,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = EXOFS_NAME_LEN;
out:
- exofs_put_io_state(ios);
+ ore_put_io_state(ios);
return ret;
}
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 52c05376394..35d6a3cfd9f 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -194,12 +194,10 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
case ACL_TYPE_ACCESS:
name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
- inode->i_mode = mode;
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
if (error == 0)
@@ -253,16 +251,14 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
inode->i_mode &= ~current_umask();
}
if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
- mode_t mode = inode->i_mode;
if (S_ISDIR(inode->i_mode)) {
error = ext2_set_acl(inode, ACL_TYPE_DEFAULT, acl);
if (error)
goto cleanup;
}
- error = posix_acl_create(&acl, GFP_KERNEL, &mode);
+ error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
if (error < 0)
return error;
- inode->i_mode = mode;
if (error > 0) {
/* This is an extended ACL */
error = ext2_set_acl(inode, ACL_TYPE_ACCESS, acl);
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 6c29bf0df04..3091f62e55b 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -199,12 +199,10 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
case ACL_TYPE_ACCESS:
name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
- inode->i_mode = mode;
inode->i_ctime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, inode);
if (error == 0)
@@ -261,19 +259,16 @@ ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
inode->i_mode &= ~current_umask();
}
if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
- mode_t mode = inode->i_mode;
-
if (S_ISDIR(inode->i_mode)) {
error = ext3_set_acl(handle, inode,
ACL_TYPE_DEFAULT, acl);
if (error)
goto cleanup;
}
- error = posix_acl_create(&acl, GFP_NOFS, &mode);
+ error = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (error < 0)
return error;
- inode->i_mode = mode;
if (error > 0) {
/* This is an extended ACL */
error = ext3_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 04109460ba9..56fd8f86593 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
- mmp.o
+ mmp.o indirect.o
ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index dca2d1ded93..a5c29bb3b83 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -198,12 +198,10 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
case ACL_TYPE_ACCESS:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
- inode->i_mode = mode;
inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
if (error == 0)
@@ -259,19 +257,16 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
inode->i_mode &= ~current_umask();
}
if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
- mode_t mode = inode->i_mode;
-
if (S_ISDIR(inode->i_mode)) {
error = ext4_set_acl(handle, inode,
ACL_TYPE_DEFAULT, acl);
if (error)
goto cleanup;
}
- error = posix_acl_create(&acl, GFP_NOFS, &mode);
+ error = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (error < 0)
return error;
- inode->i_mode = mode;
if (error > 0) {
/* This is an extended ACL */
error = ext4_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 264f6949511..f8224adf496 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -620,3 +620,51 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
}
+/**
+ * ext4_inode_to_goal_block - return a hint for block allocation
+ * @inode: inode for block allocation
+ *
+ * Return the ideal location to start allocating blocks for a
+ * newly created inode.
+ */
+ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ ext4_group_t block_group;
+ ext4_grpblk_t colour;
+ int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
+ ext4_fsblk_t bg_start;
+ ext4_fsblk_t last_block;
+
+ block_group = ei->i_block_group;
+ if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
+ /*
+ * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
+ * block groups per flexgroup, reserve the first block
+ * group for directories and special files. Regular
+ * files will start at the second block group. This
+ * tends to speed up directory access and improves
+ * fsck times.
+ */
+ block_group &= ~(flex_size-1);
+ if (S_ISREG(inode->i_mode))
+ block_group++;
+ }
+ bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
+ last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
+
+ /*
+ * If we are doing delayed allocation, we don't need take
+ * colour into account.
+ */
+ if (test_opt(inode->i_sb, DELALLOC))
+ return bg_start;
+
+ if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
+ colour = (current->pid % 16) *
+ (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+ else
+ colour = (current->pid % 16) * ((last_block - bg_start) / 16);
+ return bg_start + colour;
+}
+
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index fac90f3fba8..8efb2f0a344 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -246,3 +246,24 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
return 1;
}
+int ext4_check_blockref(const char *function, unsigned int line,
+ struct inode *inode, __le32 *p, unsigned int max)
+{
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ __le32 *bref = p;
+ unsigned int blk;
+
+ while (bref < p+max) {
+ blk = le32_to_cpu(*bref++);
+ if (blk &&
+ unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
+ blk, 1))) {
+ es->s_last_error_block = cpu_to_le64(blk);
+ ext4_error_inode(inode, function, line, blk,
+ "invalid block");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index fa44df87971..e717dfd2f2b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -526,6 +526,7 @@ struct ext4_new_group_data {
#define EXT4_FREE_BLOCKS_METADATA 0x0001
#define EXT4_FREE_BLOCKS_FORGET 0x0002
#define EXT4_FREE_BLOCKS_VALIDATED 0x0004
+#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
/*
* ioctl commands
@@ -939,6 +940,8 @@ struct ext4_inode_info {
#define ext4_find_next_zero_bit find_next_zero_bit_le
#define ext4_find_next_bit find_next_bit_le
+extern void ext4_set_bits(void *bm, int cur, int len);
+
/*
* Maximal mount counts between two filesystem checks
*/
@@ -1126,7 +1129,8 @@ struct ext4_sb_info {
struct journal_s *s_journal;
struct list_head s_orphan;
struct mutex s_orphan_lock;
- struct mutex s_resize_lock;
+ unsigned long s_resize_flags; /* Flags indicating if there
+ is a resizer */
unsigned long s_commit_interval;
u32 s_max_batch_time;
u32 s_min_batch_time;
@@ -1214,6 +1218,9 @@ struct ext4_sb_info {
/* Kernel thread for multiple mount protection */
struct task_struct *s_mmp_tsk;
+
+ /* record the last minlen when FITRIM is called. */
+ atomic_t s_last_trim_minblks;
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1743,6 +1750,7 @@ extern unsigned ext4_init_block_bitmap(struct super_block *sb,
struct ext4_group_desc *desc);
#define ext4_free_blocks_after_init(sb, group, desc) \
ext4_init_block_bitmap(sb, NULL, group, desc)
+ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
/* dir.c */
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
@@ -1793,7 +1801,7 @@ extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
unsigned long count, int flags);
extern int ext4_mb_add_groupinfo(struct super_block *sb,
ext4_group_t i, struct ext4_group_desc *desc);
-extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
+extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_fsblk_t block, unsigned long count);
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
@@ -1834,6 +1842,17 @@ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
+
+/* indirect.c */
+extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map, int flags);
+extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
+ const struct iovec *iov, loff_t offset,
+ unsigned long nr_segs);
+extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
+extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk);
+extern void ext4_ind_truncate(struct inode *inode);
+
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
@@ -1855,6 +1874,9 @@ extern int ext4_group_extend(struct super_block *sb,
ext4_fsblk_t n_blocks_count);
/* super.c */
+extern void *ext4_kvmalloc(size_t size, gfp_t flags);
+extern void *ext4_kvzalloc(size_t size, gfp_t flags);
+extern void ext4_kvfree(void *ptr);
extern void __ext4_error(struct super_block *, const char *, unsigned int,
const char *, ...)
__attribute__ ((format (printf, 4, 5)));
@@ -2067,11 +2089,19 @@ struct ext4_group_info {
* 5 free 8-block regions. */
};
-#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
+#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
+#define EXT4_GROUP_INFO_WAS_TRIMMED_BIT 1
#define EXT4_MB_GRP_NEED_INIT(grp) \
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_WAS_TRIMMED(grp) \
+ (test_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_SET_TRIMMED(grp) \
+ (set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \
+ (clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
+
#define EXT4_MAX_CONTENTION 8
#define EXT4_CONTENTION_THRESHOLD 2
@@ -2123,6 +2153,19 @@ static inline void ext4_mark_super_dirty(struct super_block *sb)
}
/*
+ * Block validity checking
+ */
+#define ext4_check_indirect_blockref(inode, bh) \
+ ext4_check_blockref(__func__, __LINE__, inode, \
+ (__le32 *)(bh)->b_data, \
+ EXT4_ADDR_PER_BLOCK((inode)->i_sb))
+
+#define ext4_ind_check_inode(inode) \
+ ext4_check_blockref(__func__, __LINE__, inode, \
+ EXT4_I(inode)->i_data, \
+ EXT4_NDIR_BLOCKS)
+
+/*
* Inodes and files operations
*/
@@ -2151,6 +2194,8 @@ extern void ext4_exit_system_zone(void);
extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
ext4_fsblk_t start_blk,
unsigned int count);
+extern int ext4_check_blockref(const char *, unsigned int,
+ struct inode *, __le32 *, unsigned int);
/* extents.c */
extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
@@ -2230,6 +2275,10 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
+#define EXT4_RESIZING 0
+extern int ext4_resize_begin(struct super_block *sb);
+extern void ext4_resize_end(struct super_block *sb);
+
#endif /* __KERNEL__ */
#endif /* _EXT4_H */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index f815cc81e7a..57cf568a98a 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -114,12 +114,6 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t block)
{
- struct ext4_inode_info *ei = EXT4_I(inode);
- ext4_fsblk_t bg_start;
- ext4_fsblk_t last_block;
- ext4_grpblk_t colour;
- ext4_group_t block_group;
- int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
int depth;
if (path) {
@@ -161,36 +155,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
}
/* OK. use inode's group */
- block_group = ei->i_block_group;
- if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
- /*
- * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
- * block groups per flexgroup, reserve the first block
- * group for directories and special files. Regular
- * files will start at the second block group. This
- * tends to speed up directory access and improves
- * fsck times.
- */
- block_group &= ~(flex_size-1);
- if (S_ISREG(inode->i_mode))
- block_group++;
- }
- bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
- last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
-
- /*
- * If we are doing delayed allocation, we don't need take
- * colour into account.
- */
- if (test_opt(inode->i_sb, DELALLOC))
- return bg_start;
-
- if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
- colour = (current->pid % 16) *
- (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
- else
- colour = (current->pid % 16) * ((last_block - bg_start) / 16);
- return bg_start + colour + block;
+ return ext4_inode_to_goal_block(inode);
}
/*
@@ -776,6 +741,16 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
logical, le32_to_cpu(curp->p_idx->ei_block));
return -EIO;
}
+
+ if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
+ >= le16_to_cpu(curp->p_hdr->eh_max))) {
+ EXT4_ERROR_INODE(inode,
+ "eh_entries %d >= eh_max %d!",
+ le16_to_cpu(curp->p_hdr->eh_entries),
+ le16_to_cpu(curp->p_hdr->eh_max));
+ return -EIO;
+ }
+
len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
/* insert after */
@@ -805,13 +780,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
ext4_idx_store_pblock(ix, ptr);
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
- if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
- > le16_to_cpu(curp->p_hdr->eh_max))) {
- EXT4_ERROR_INODE(inode,
- "logical %d == ei_block %d!",
- logical, le32_to_cpu(curp->p_idx->ei_block));
- return -EIO;
- }
if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
return -EIO;
@@ -1446,8 +1414,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
* ext4_ext_next_leaf_block:
* returns first allocated block from next leaf or EXT_MAX_BLOCKS
*/
-static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
- struct ext4_ext_path *path)
+static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
{
int depth;
@@ -1757,7 +1724,6 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
goto merge;
}
-repeat:
depth = ext_depth(inode);
eh = path[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
@@ -1765,9 +1731,10 @@ repeat:
/* probably next leaf has space for us? */
fex = EXT_LAST_EXTENT(eh);
- next = ext4_ext_next_leaf_block(inode, path);
- if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
- && next != EXT_MAX_BLOCKS) {
+ next = EXT_MAX_BLOCKS;
+ if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
+ next = ext4_ext_next_leaf_block(path);
+ if (next != EXT_MAX_BLOCKS) {
ext_debug("next leaf block - %d\n", next);
BUG_ON(npath != NULL);
npath = ext4_ext_find_extent(inode, next, NULL);
@@ -1779,7 +1746,7 @@ repeat:
ext_debug("next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
path = npath;
- goto repeat;
+ goto has_space;
}
ext_debug("next leaf has no free space(%d,%d)\n",
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
@@ -1839,7 +1806,7 @@ has_space:
ext4_ext_pblock(newext),
ext4_ext_is_uninitialized(newext),
ext4_ext_get_actual_len(newext),
- nearex, len, nearex + 1, nearex + 2);
+ nearex, len, nearex, nearex + 1);
memmove(nearex + 1, nearex, len);
path[depth].p_ext = nearex;
}
@@ -2052,7 +2019,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
}
/*
- * ext4_ext_in_cache()
+ * ext4_ext_check_cache()
* Checks to see if the given block is in the cache.
* If it is, the cached extent is stored in the given
* cache extent pointer. If the cached extent is a hole,
@@ -2134,8 +2101,6 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
/*
* ext4_ext_rm_idx:
* removes index from the index block.
- * It's used in truncate case only, thus all requests are for
- * last index in the block only.
*/
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path)
@@ -2153,6 +2118,13 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
err = ext4_ext_get_access(handle, inode, path);
if (err)
return err;
+
+ if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
+ int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
+ len *= sizeof(struct ext4_extent_idx);
+ memmove(path->p_idx, path->p_idx + 1, len);
+ }
+
le16_add_cpu(&path->p_hdr->eh_entries, -1);
err = ext4_ext_dirty(handle, inode, path);
if (err)
@@ -2534,8 +2506,7 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
return 1;
}
-static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
- ext4_lblk_t end)
+static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
{
struct super_block *sb = inode->i_sb;
int depth = ext_depth(inode);
@@ -2575,7 +2546,7 @@ again:
if (i == depth) {
/* this is leaf block */
err = ext4_ext_rm_leaf(handle, inode, path,
- start, end);
+ start, EXT_MAX_BLOCKS - 1);
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
@@ -3107,12 +3078,10 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
struct ext4_ext_path *path)
{
struct ext4_extent *ex;
- struct ext4_extent_header *eh;
int depth;
int err = 0;
depth = ext_depth(inode);
- eh = path[depth].p_hdr;
ex = path[depth].p_ext;
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
@@ -3357,8 +3326,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* check in cache */
- if (ext4_ext_in_cache(inode, map->m_lblk, &newex) &&
- ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0)) {
+ if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) &&
+ ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
if (!newex.ee_start_lo && !newex.ee_start_hi) {
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
@@ -3497,8 +3466,27 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ext4_ext_mark_uninitialized(ex);
- err = ext4_ext_remove_space(inode, map->m_lblk,
- map->m_lblk + punched_out);
+ ext4_ext_invalidate_cache(inode);
+
+ err = ext4_ext_rm_leaf(handle, inode, path,
+ map->m_lblk, map->m_lblk + punched_out);
+
+ if (!err && path->p_hdr->eh_entries == 0) {
+ /*
+ * Punch hole freed all of this sub tree,
+ * so we need to correct eh_depth
+ */
+ err = ext4_ext_get_access(handle, inode, path);
+ if (err == 0) {
+ ext_inode_hdr(inode)->eh_depth = 0;
+ ext_inode_hdr(inode)->eh_max =
+ cpu_to_le16(ext4_ext_space_root(
+ inode, 0));
+
+ err = ext4_ext_dirty(
+ handle, inode, path);
+ }
+ }
goto out2;
}
@@ -3596,17 +3584,18 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
}
err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
- if (err)
- goto out2;
-
- err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (!err)
+ err = ext4_ext_insert_extent(handle, inode, path,
+ &newex, flags);
if (err) {
+ int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
+ EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
/* free data blocks we just allocated */
/* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free() */
ext4_discard_preallocations(inode);
ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
- ext4_ext_get_actual_len(&newex), 0);
+ ext4_ext_get_actual_len(&newex), fb_flags);
goto out2;
}
@@ -3699,7 +3688,7 @@ void ext4_ext_truncate(struct inode *inode)
last_block = (inode->i_size + sb->s_blocksize - 1)
>> EXT4_BLOCK_SIZE_BITS(sb);
- err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+ err = ext4_ext_remove_space(inode, last_block);
/* In a multi-transaction truncate, we only make the final
* transaction synchronous.
@@ -3835,7 +3824,7 @@ retry:
blkbits) >> blkbits))
new_size = offset + len;
else
- new_size = (map.m_lblk + ret) << blkbits;
+ new_size = ((loff_t) map.m_lblk + ret) << blkbits;
ext4_falloc_update_inode(inode, mode, new_size,
(map.m_flags & EXT4_MAP_NEW));
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index da3bed3e0c2..036f78f7a1e 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -129,15 +129,30 @@ static int ext4_sync_parent(struct inode *inode)
{
struct writeback_control wbc;
struct dentry *dentry = NULL;
+ struct inode *next;
int ret = 0;
- while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
+ if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
+ return 0;
+ inode = igrab(inode);
+ while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
- dentry = list_entry(inode->i_dentry.next,
- struct dentry, d_alias);
- if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
+ dentry = NULL;
+ spin_lock(&inode->i_lock);
+ if (!list_empty(&inode->i_dentry)) {
+ dentry = list_first_entry(&inode->i_dentry,
+ struct dentry, d_alias);
+ dget(dentry);
+ }
+ spin_unlock(&inode->i_lock);
+ if (!dentry)
break;
- inode = dentry->d_parent->d_inode;
+ next = igrab(dentry->d_parent->d_inode);
+ dput(dentry);
+ if (!next)
+ break;
+ iput(inode);
+ inode = next;
ret = sync_mapping_buffers(inode->i_mapping);
if (ret)
break;
@@ -148,6 +163,7 @@ static int ext4_sync_parent(struct inode *inode)
if (ret)
break;
}
+ iput(inode);
return ret;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 21bb2f61e50..9c63f273b55 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1287,7 +1287,7 @@ extern int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
group, used_blks,
ext4_itable_unused_count(sb, gdp));
ret = 1;
- goto out;
+ goto err_out;
}
blk = ext4_inode_table(sb, gdp) + used_blks;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
new file mode 100644
index 00000000000..b8602cde5b5
--- /dev/null
+++ b/fs/ext4/indirect.c
@@ -0,0 +1,1482 @@
+/*
+ * linux/fs/ext4/indirect.c
+ *
+ * from
+ *
+ * linux/fs/ext4/inode.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * from
+ *
+ * linux/fs/minix/inode.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Goal-directed block allocation by Stephen Tweedie
+ * (sct@redhat.com), 1993, 1998
+ */
+
+#include <linux/module.h>
+#include "ext4_jbd2.h"
+#include "truncate.h"
+
+#include <trace/events/ext4.h>
+
+typedef struct {
+ __le32 *p;
+ __le32 key;
+ struct buffer_head *bh;
+} Indirect;
+
+static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
+{
+ p->key = *(p->p = v);
+ p->bh = bh;
+}
+
+/**
+ * ext4_block_to_path - parse the block number into array of offsets
+ * @inode: inode in question (we are only interested in its superblock)
+ * @i_block: block number to be parsed
+ * @offsets: array to store the offsets in
+ * @boundary: set this non-zero if the referred-to block is likely to be
+ * followed (on disk) by an indirect block.
+ *
+ * To store the locations of file's data ext4 uses a data structure common
+ * for UNIX filesystems - tree of pointers anchored in the inode, with
+ * data blocks at leaves and indirect blocks in intermediate nodes.
+ * This function translates the block number into path in that tree -
+ * return value is the path length and @offsets[n] is the offset of
+ * pointer to (n+1)th node in the nth one. If @block is out of range
+ * (negative or too large) warning is printed and zero returned.
+ *
+ * Note: function doesn't find node addresses, so no IO is needed. All
+ * we need to know is the capacity of indirect blocks (taken from the
+ * inode->i_sb).
+ */
+
+/*
+ * Portability note: the last comparison (check that we fit into triple
+ * indirect block) is spelled differently, because otherwise on an
+ * architecture with 32-bit longs and 8Kb pages we might get into trouble
+ * if our filesystem had 8Kb blocks. We might use long long, but that would
+ * kill us on x86. Oh, well, at least the sign propagation does not matter -
+ * i_block would have to be negative in the very beginning, so we would not
+ * get there at all.
+ */
+
+static int ext4_block_to_path(struct inode *inode,
+ ext4_lblk_t i_block,
+ ext4_lblk_t offsets[4], int *boundary)
+{
+ int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+ int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
+ const long direct_blocks = EXT4_NDIR_BLOCKS,
+ indirect_blocks = ptrs,
+ double_blocks = (1 << (ptrs_bits * 2));
+ int n = 0;
+ int final = 0;
+
+ if (i_block < direct_blocks) {
+ offsets[n++] = i_block;
+ final = direct_blocks;
+ } else if ((i_block -= direct_blocks) < indirect_blocks) {
+ offsets[n++] = EXT4_IND_BLOCK;
+ offsets[n++] = i_block;
+ final = ptrs;
+ } else if ((i_block -= indirect_blocks) < double_blocks) {
+ offsets[n++] = EXT4_DIND_BLOCK;
+ offsets[n++] = i_block >> ptrs_bits;
+ offsets[n++] = i_block & (ptrs - 1);
+ final = ptrs;
+ } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
+ offsets[n++] = EXT4_TIND_BLOCK;
+ offsets[n++] = i_block >> (ptrs_bits * 2);
+ offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
+ offsets[n++] = i_block & (ptrs - 1);
+ final = ptrs;
+ } else {
+ ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
+ i_block + direct_blocks +
+ indirect_blocks + double_blocks, inode->i_ino);
+ }
+ if (boundary)
+ *boundary = final - 1 - (i_block & (ptrs - 1));
+ return n;
+}
+
+/**
+ * ext4_get_branch - read the chain of indirect blocks leading to data
+ * @inode: inode in question
+ * @depth: depth of the chain (1 - direct pointer, etc.)
+ * @offsets: offsets of pointers in inode/indirect blocks
+ * @chain: place to store the result
+ * @err: here we store the error value
+ *
+ * Function fills the array of triples <key, p, bh> and returns %NULL
+ * if everything went OK or the pointer to the last filled triple
+ * (incomplete one) otherwise. Upon the return chain[i].key contains
+ * the number of (i+1)-th block in the chain (as it is stored in memory,
+ * i.e. little-endian 32-bit), chain[i].p contains the address of that
+ * number (it points into struct inode for i==0 and into the bh->b_data
+ * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
+ * block for i>0 and NULL for i==0. In other words, it holds the block
+ * numbers of the chain, addresses they were taken from (and where we can
+ * verify that chain did not change) and buffer_heads hosting these
+ * numbers.
+ *
+ * Function stops when it stumbles upon zero pointer (absent block)
+ * (pointer to last triple returned, *@err == 0)
+ * or when it gets an IO error reading an indirect block
+ * (ditto, *@err == -EIO)
+ * or when it reads all @depth-1 indirect blocks successfully and finds
+ * the whole chain, all way to the data (returns %NULL, *err == 0).
+ *
+ * Need to be called with
+ * down_read(&EXT4_I(inode)->i_data_sem)
+ */
+static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ ext4_lblk_t *offsets,
+ Indirect chain[4], int *err)
+{
+ struct super_block *sb = inode->i_sb;
+ Indirect *p = chain;
+ struct buffer_head *bh;
+
+ *err = 0;
+ /* i_data is not going away, no lock needed */
+ add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
+ if (!p->key)
+ goto no_block;
+ while (--depth) {
+ bh = sb_getblk(sb, le32_to_cpu(p->key));
+ if (unlikely(!bh))
+ goto failure;
+
+ if (!bh_uptodate_or_lock(bh)) {
+ if (bh_submit_read(bh) < 0) {
+ put_bh(bh);
+ goto failure;
+ }
+ /* validate block references */
+ if (ext4_check_indirect_blockref(inode, bh)) {
+ put_bh(bh);
+ goto failure;
+ }
+ }
+
+ add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
+ /* Reader: end */
+ if (!p->key)
+ goto no_block;
+ }
+ return NULL;
+
+failure:
+ *err = -EIO;
+no_block:
+ return p;
+}
+
+/**
+ * ext4_find_near - find a place for allocation with sufficient locality
+ * @inode: owner
+ * @ind: descriptor of indirect block.
+ *
+ * This function returns the preferred place for block allocation.
+ * It is used when heuristic for sequential allocation fails.
+ * Rules are:
+ * + if there is a block to the left of our position - allocate near it.
+ * + if pointer will live in indirect block - allocate near that block.
+ * + if pointer will live in inode - allocate in the same
+ * cylinder group.
+ *
+ * In the latter case we colour the starting block by the callers PID to
+ * prevent it from clashing with concurrent allocations for a different inode
+ * in the same block group. The PID is used here so that functionally related
+ * files will be close-by on-disk.
+ *
+ * Caller must make sure that @ind is valid and will stay that way.
+ */
+static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
+ __le32 *p;
+
+ /* Try to find previous block */
+ for (p = ind->p - 1; p >= start; p--) {
+ if (*p)
+ return le32_to_cpu(*p);
+ }
+
+ /* No such thing, so let's try location of indirect block */
+ if (ind->bh)
+ return ind->bh->b_blocknr;
+
+ /*
+ * It is going to be referred to from the inode itself? OK, just put it
+ * into the same cylinder group then.
+ */
+ return ext4_inode_to_goal_block(inode);
+}
+
+/**
+ * ext4_find_goal - find a preferred place for allocation.
+ * @inode: owner
+ * @block: block we want
+ * @partial: pointer to the last triple within a chain
+ *
+ * Normally this function find the preferred place for block allocation,
+ * returns it.
+ * Because this is only used for non-extent files, we limit the block nr
+ * to 32 bits.
+ */
+static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
+ Indirect *partial)
+{
+ ext4_fsblk_t goal;
+
+ /*
+ * XXX need to get goal block from mballoc's data structures
+ */
+
+ goal = ext4_find_near(inode, partial);
+ goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
+ return goal;
+}
+
+/**
+ * ext4_blks_to_allocate - Look up the block map and count the number
+ * of direct blocks need to be allocated for the given branch.
+ *
+ * @branch: chain of indirect blocks
+ * @k: number of blocks need for indirect blocks
+ * @blks: number of data blocks to be mapped.
+ * @blocks_to_boundary: the offset in the indirect block
+ *
+ * return the total number of blocks to be allocate, including the
+ * direct and indirect blocks.
+ */
+static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
+ int blocks_to_boundary)
+{
+ unsigned int count = 0;
+
+ /*
+ * Simple case, [t,d]Indirect block(s) has not allocated yet
+ * then it's clear blocks on that path have not allocated
+ */
+ if (k > 0) {
+ /* right now we don't handle cross boundary allocation */
+ if (blks < blocks_to_boundary + 1)
+ count += blks;
+ else
+ count += blocks_to_boundary + 1;
+ return count;
+ }
+
+ count++;
+ while (count < blks && count <= blocks_to_boundary &&
+ le32_to_cpu(*(branch[0].p + count)) == 0) {
+ count++;
+ }
+ return count;
+}
+
+/**
+ * ext4_alloc_blocks: multiple allocate blocks needed for a branch
+ * @handle: handle for this transaction
+ * @inode: inode which needs allocated blocks
+ * @iblock: the logical block to start allocated at
+ * @goal: preferred physical block of allocation
+ * @indirect_blks: the number of blocks need to allocate for indirect
+ * blocks
+ * @blks: number of desired blocks
+ * @new_blocks: on return it will store the new block numbers for
+ * the indirect blocks(if needed) and the first direct block,
+ * @err: on return it will store the error code
+ *
+ * This function will return the number of blocks allocated as
+ * requested by the passed-in parameters.
+ */
+static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
+ ext4_lblk_t iblock, ext4_fsblk_t goal,
+ int indirect_blks, int blks,
+ ext4_fsblk_t new_blocks[4], int *err)
+{
+ struct ext4_allocation_request ar;
+ int target, i;
+ unsigned long count = 0, blk_allocated = 0;
+ int index = 0;
+ ext4_fsblk_t current_block = 0;
+ int ret = 0;
+
+ /*
+ * Here we try to allocate the requested multiple blocks at once,
+ * on a best-effort basis.
+ * To build a branch, we should allocate blocks for
+ * the indirect blocks(if not allocated yet), and at least
+ * the first direct block of this branch. That's the
+ * minimum number of blocks need to allocate(required)
+ */
+ /* first we try to allocate the indirect blocks */
+ target = indirect_blks;
+ while (target > 0) {
+ count = target;
+ /* allocating blocks for indirect blocks and direct blocks */
+ current_block = ext4_new_meta_blocks(handle, inode, goal,
+ 0, &count, err);
+ if (*err)
+ goto failed_out;
+
+ if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
+ EXT4_ERROR_INODE(inode,
+ "current_block %llu + count %lu > %d!",
+ current_block, count,
+ EXT4_MAX_BLOCK_FILE_PHYS);
+ *err = -EIO;
+ goto failed_out;
+ }
+
+ target -= count;
+ /* allocate blocks for indirect blocks */
+ while (index < indirect_blks && count) {
+ new_blocks[index++] = current_block++;
+ count--;
+ }
+ if (count > 0) {
+ /*
+ * save the new block number
+ * for the first direct block
+ */
+ new_blocks[index] = current_block;
+ printk(KERN_INFO "%s returned more blocks than "
+ "requested\n", __func__);
+ WARN_ON(1);
+ break;
+ }
+ }
+
+ target = blks - count ;
+ blk_allocated = count;
+ if (!target)
+ goto allocated;
+ /* Now allocate data blocks */
+ memset(&ar, 0, sizeof(ar));
+ ar.inode = inode;
+ ar.goal = goal;
+ ar.len = target;
+ ar.logical = iblock;
+ if (S_ISREG(inode->i_mode))
+ /* enable in-core preallocation only for regular files */
+ ar.flags = EXT4_MB_HINT_DATA;
+
+ current_block = ext4_mb_new_blocks(handle, &ar, err);
+ if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
+ EXT4_ERROR_INODE(inode,
+ "current_block %llu + ar.len %d > %d!",
+ current_block, ar.len,
+ EXT4_MAX_BLOCK_FILE_PHYS);
+ *err = -EIO;
+ goto failed_out;
+ }
+
+ if (*err && (target == blks)) {
+ /*
+ * if the allocation failed and we didn't allocate
+ * any blocks before
+ */
+ goto failed_out;
+ }
+ if (!*err) {
+ if (target == blks) {
+ /*
+ * save the new block number
+ * for the first direct block
+ */
+ new_blocks[index] = current_block;
+ }
+ blk_allocated += ar.len;
+ }
+allocated:
+ /* total number of blocks allocated for direct blocks */
+ ret = blk_allocated;
+ *err = 0;
+ return ret;
+failed_out:
+ for (i = 0; i < index; i++)
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
+ return ret;
+}
+
+/**
+ * ext4_alloc_branch - allocate and set up a chain of blocks.
+ * @handle: handle for this transaction
+ * @inode: owner
+ * @indirect_blks: number of allocated indirect blocks
+ * @blks: number of allocated direct blocks
+ * @goal: preferred place for allocation
+ * @offsets: offsets (in the blocks) to store the pointers to next.
+ * @branch: place to store the chain in.
+ *
+ * This function allocates blocks, zeroes out all but the last one,
+ * links them into chain and (if we are synchronous) writes them to disk.
+ * In other words, it prepares a branch that can be spliced onto the
+ * inode. It stores the information about that chain in the branch[], in
+ * the same format as ext4_get_branch() would do. We are calling it after
+ * we had read the existing part of chain and partial points to the last
+ * triple of that (one with zero ->key). Upon the exit we have the same
+ * picture as after the successful ext4_get_block(), except that in one
+ * place chain is disconnected - *branch->p is still zero (we did not
+ * set the last link), but branch->key contains the number that should
+ * be placed into *branch->p to fill that gap.
+ *
+ * If allocation fails we free all blocks we've allocated (and forget
+ * their buffer_heads) and return the error value the from failed
+ * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
+ * as described above and return 0.
+ */
+static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+ ext4_lblk_t iblock, int indirect_blks,
+ int *blks, ext4_fsblk_t goal,
+ ext4_lblk_t *offsets, Indirect *branch)
+{
+ int blocksize = inode->i_sb->s_blocksize;
+ int i, n = 0;
+ int err = 0;
+ struct buffer_head *bh;
+ int num;
+ ext4_fsblk_t new_blocks[4];
+ ext4_fsblk_t current_block;
+
+ num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
+ *blks, new_blocks, &err);
+ if (err)
+ return err;
+
+ branch[0].key = cpu_to_le32(new_blocks[0]);
+ /*
+ * metadata blocks and data blocks are allocated.
+ */
+ for (n = 1; n <= indirect_blks; n++) {
+ /*
+ * Get buffer_head for parent block, zero it out
+ * and set the pointer to new one, then send
+ * parent to disk.
+ */
+ bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ if (unlikely(!bh)) {
+ err = -EIO;
+ goto failed;
+ }
+
+ branch[n].bh = bh;
+ lock_buffer(bh);
+ BUFFER_TRACE(bh, "call get_create_access");
+ err = ext4_journal_get_create_access(handle, bh);
+ if (err) {
+ /* Don't brelse(bh) here; it's done in
+ * ext4_journal_forget() below */
+ unlock_buffer(bh);
+ goto failed;
+ }
+
+ memset(bh->b_data, 0, blocksize);
+ branch[n].p = (__le32 *) bh->b_data + offsets[n];
+ branch[n].key = cpu_to_le32(new_blocks[n]);
+ *branch[n].p = branch[n].key;
+ if (n == indirect_blks) {
+ current_block = new_blocks[n];
+ /*
+ * End of chain, update the last new metablock of
+ * the chain to point to the new allocated
+ * data blocks numbers
+ */
+ for (i = 1; i < num; i++)
+ *(branch[n].p + i) = cpu_to_le32(++current_block);
+ }
+ BUFFER_TRACE(bh, "marking uptodate");
+ set_buffer_uptodate(bh);
+ unlock_buffer(bh);
+
+ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (err)
+ goto failed;
+ }
+ *blks = num;
+ return err;
+failed:
+ /* Allocation failed, free what we already allocated */
+ ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
+ for (i = 1; i <= n ; i++) {
+ /*
+ * branch[i].bh is newly allocated, so there is no
+ * need to revoke the block, which is why we don't
+ * need to set EXT4_FREE_BLOCKS_METADATA.
+ */
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
+ EXT4_FREE_BLOCKS_FORGET);
+ }
+ for (i = n+1; i < indirect_blks; i++)
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
+
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
+
+ return err;
+}
+
+/**
+ * ext4_splice_branch - splice the allocated branch onto inode.
+ * @handle: handle for this transaction
+ * @inode: owner
+ * @block: (logical) number of block we are adding
+ * @chain: chain of indirect blocks (with a missing link - see
+ * ext4_alloc_branch)
+ * @where: location of missing link
+ * @num: number of indirect blocks we are adding
+ * @blks: number of direct blocks we are adding
+ *
+ * This function fills the missing link and does all housekeeping needed in
+ * inode (->i_blocks, etc.). In case of success we end up with the full
+ * chain to new block and return 0.
+ */
+static int ext4_splice_branch(handle_t *handle, struct inode *inode,
+ ext4_lblk_t block, Indirect *where, int num,
+ int blks)
+{
+ int i;
+ int err = 0;
+ ext4_fsblk_t current_block;
+
+ /*
+ * If we're splicing into a [td]indirect block (as opposed to the
+ * inode) then we need to get write access to the [td]indirect block
+ * before the splice.
+ */
+ if (where->bh) {
+ BUFFER_TRACE(where->bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, where->bh);
+ if (err)
+ goto err_out;
+ }
+ /* That's it */
+
+ *where->p = where->key;
+
+ /*
+ * Update the host buffer_head or inode to point to more just allocated
+ * direct blocks blocks
+ */
+ if (num == 0 && blks > 1) {
+ current_block = le32_to_cpu(where->key) + 1;
+ for (i = 1; i < blks; i++)
+ *(where->p + i) = cpu_to_le32(current_block++);
+ }
+
+ /* We are done with atomic stuff, now do the rest of housekeeping */
+ /* had we spliced it onto indirect block? */
+ if (where->bh) {
+ /*
+ * If we spliced it onto an indirect block, we haven't
+ * altered the inode. Note however that if it is being spliced
+ * onto an indirect block at the very end of the file (the
+ * file is growing) then we *will* alter the inode to reflect
+ * the new i_size. But that is not done here - it is done in
+ * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
+ */
+ jbd_debug(5, "splicing indirect only\n");
+ BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, inode, where->bh);
+ if (err)
+ goto err_out;
+ } else {
+ /*
+ * OK, we spliced it into the inode itself on a direct block.
+ */
+ ext4_mark_inode_dirty(handle, inode);
+ jbd_debug(5, "splicing direct\n");
+ }
+ return err;
+
+err_out:
+ for (i = 1; i <= num; i++) {
+ /*
+ * branch[i].bh is newly allocated, so there is no
+ * need to revoke the block, which is why we don't
+ * need to set EXT4_FREE_BLOCKS_METADATA.
+ */
+ ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
+ EXT4_FREE_BLOCKS_FORGET);
+ }
+ ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
+ blks, 0);
+
+ return err;
+}
+
+/*
+ * The ext4_ind_map_blocks() function handles non-extents inodes
+ * (i.e., using the traditional indirect/double-indirect i_blocks
+ * scheme) for ext4_map_blocks().
+ *
+ * Allocation strategy is simple: if we have to allocate something, we will
+ * have to go the whole way to leaf. So let's do it before attaching anything
+ * to tree, set linkage between the newborn blocks, write them if sync is
+ * required, recheck the path, free and repeat if check fails, otherwise
+ * set the last missing link (that will protect us from any truncate-generated
+ * removals - all blocks on the path are immune now) and possibly force the
+ * write on the parent block.
+ * That has a nice additional property: no special recovery from the failed
+ * allocations is needed - we simply release blocks and do not touch anything
+ * reachable from inode.
+ *
+ * `handle' can be NULL if create == 0.
+ *
+ * return > 0, # of blocks mapped or allocated.
+ * return = 0, if plain lookup failed.
+ * return < 0, error case.
+ *
+ * The ext4_ind_get_blocks() function should be called with
+ * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
+ * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
+ * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
+ * blocks.
+ */
+int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map,
+ int flags)
+{
+ int err = -EIO;
+ ext4_lblk_t offsets[4];
+ Indirect chain[4];
+ Indirect *partial;
+ ext4_fsblk_t goal;
+ int indirect_blks;
+ int blocks_to_boundary = 0;
+ int depth;
+ int count = 0;
+ ext4_fsblk_t first_block = 0;
+
+ trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
+ J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
+ J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
+ depth = ext4_block_to_path(inode, map->m_lblk, offsets,
+ &blocks_to_boundary);
+
+ if (depth == 0)
+ goto out;
+
+ partial = ext4_get_branch(inode, depth, offsets, chain, &err);
+
+ /* Simplest case - block found, no allocation needed */
+ if (!partial) {
+ first_block = le32_to_cpu(chain[depth - 1].key);
+ count++;
+ /*map more blocks*/
+ while (count < map->m_len && count <= blocks_to_boundary) {
+ ext4_fsblk_t blk;
+
+ blk = le32_to_cpu(*(chain[depth-1].p + count));
+
+ if (blk == first_block + count)
+ count++;
+ else
+ break;
+ }
+ goto got_it;
+ }
+
+ /* Next simple case - plain lookup or failed read of indirect block */
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
+ goto cleanup;
+
+ /*
+ * Okay, we need to do block allocation.
+ */
+ goal = ext4_find_goal(inode, map->m_lblk, partial);
+
+ /* the number of blocks need to allocate for [d,t]indirect blocks */
+ indirect_blks = (chain + depth) - partial - 1;
+
+ /*
+ * Next look up the indirect map to count the totoal number of
+ * direct blocks to allocate for this branch.
+ */
+ count = ext4_blks_to_allocate(partial, indirect_blks,
+ map->m_len, blocks_to_boundary);
+ /*
+ * Block out ext4_truncate while we alter the tree
+ */
+ err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
+ &count, goal,
+ offsets + (partial - chain), partial);
+
+ /*
+ * The ext4_splice_branch call will free and forget any buffers
+ * on the new chain if there is a failure, but that risks using
+ * up transaction credits, especially for bitmaps where the
+ * credits cannot be returned. Can we handle this somehow? We
+ * may need to return -EAGAIN upwards in the worst case. --sct
+ */
+ if (!err)
+ err = ext4_splice_branch(handle, inode, map->m_lblk,
+ partial, indirect_blks, count);
+ if (err)
+ goto cleanup;
+
+ map->m_flags |= EXT4_MAP_NEW;
+
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+got_it:
+ map->m_flags |= EXT4_MAP_MAPPED;
+ map->m_pblk = le32_to_cpu(chain[depth-1].key);
+ map->m_len = count;
+ if (count > blocks_to_boundary)
+ map->m_flags |= EXT4_MAP_BOUNDARY;
+ err = count;
+ /* Clean up and exit */
+ partial = chain + depth - 1; /* the whole chain */
+cleanup:
+ while (partial > chain) {
+ BUFFER_TRACE(partial->bh, "call brelse");
+ brelse(partial->bh);
+ partial--;
+ }
+out:
+ trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
+ map->m_pblk, map->m_len, err);
+ return err;
+}
+
+/*
+ * O_DIRECT for ext3 (or indirect map) based files
+ *
+ * If the O_DIRECT write will extend the file then add this inode to the
+ * orphan list. So recovery will truncate it back to the original size
+ * if the machine crashes during the write.
+ *
+ * If the O_DIRECT write is intantiating holes inside i_size and the machine
+ * crashes then stale disk data _may_ be exposed inside the file. But current
+ * VFS code falls back into buffered path in that case so we are safe.
+ */
+ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
+ const struct iovec *iov, loff_t offset,
+ unsigned long nr_segs)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ handle_t *handle;
+ ssize_t ret;
+ int orphan = 0;
+ size_t count = iov_length(iov, nr_segs);
+ int retries = 0;
+
+ if (rw == WRITE) {
+ loff_t final_size = offset + count;
+
+ if (final_size > inode->i_size) {
+ /* Credits for sb + inode write */
+ handle = ext4_journal_start(inode, 2);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+ }
+ ret = ext4_orphan_add(handle, inode);
+ if (ret) {
+ ext4_journal_stop(handle);
+ goto out;
+ }
+ orphan = 1;
+ ei->i_disksize = inode->i_size;
+ ext4_journal_stop(handle);
+ }
+ }
+
+retry:
+ if (rw == READ && ext4_should_dioread_nolock(inode))
+ ret = __blockdev_direct_IO(rw, iocb, inode,
+ inode->i_sb->s_bdev, iov,
+ offset, nr_segs,
+ ext4_get_block, NULL, NULL, 0);
+ else {
+ ret = blockdev_direct_IO(rw, iocb, inode, iov,
+ offset, nr_segs, ext4_get_block);
+
+ if (unlikely((rw & WRITE) && ret < 0)) {
+ loff_t isize = i_size_read(inode);
+ loff_t end = offset + iov_length(iov, nr_segs);
+
+ if (end > isize)
+ ext4_truncate_failed_write(inode);
+ }
+ }
+ if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+
+ if (orphan) {
+ int err;
+
+ /* Credits for sb + inode write */
+ handle = ext4_journal_start(inode, 2);
+ if (IS_ERR(handle)) {
+ /* This is really bad luck. We've written the data
+ * but cannot extend i_size. Bail out and pretend
+ * the write failed... */
+ ret = PTR_ERR(handle);
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+
+ goto out;
+ }
+ if (inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+ if (ret > 0) {
+ loff_t end = offset + ret;
+ if (end > inode->i_size) {
+ ei->i_disksize = end;
+ i_size_write(inode, end);
+ /*
+ * We're going to return a positive `ret'
+ * here due to non-zero-length I/O, so there's
+ * no way of reporting error returns from
+ * ext4_mark_inode_dirty() to userspace. So
+ * ignore it.
+ */
+ ext4_mark_inode_dirty(handle, inode);
+ }
+ }
+ err = ext4_journal_stop(handle);
+ if (ret == 0)
+ ret = err;
+ }
+out:
+ return ret;
+}
+
+/*
+ * Calculate the number of metadata blocks need to reserve
+ * to allocate a new block at @lblocks for non extent file based file
+ */
+int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
+ int blk_bits;
+
+ if (lblock < EXT4_NDIR_BLOCKS)
+ return 0;
+
+ lblock -= EXT4_NDIR_BLOCKS;
+
+ if (ei->i_da_metadata_calc_len &&
+ (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
+ ei->i_da_metadata_calc_len++;
+ return 0;
+ }
+ ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
+ ei->i_da_metadata_calc_len = 1;
+ blk_bits = order_base_2(lblock);
+ return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
+}
+
+int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk)
+{
+ int indirects;
+
+ /* if nrblocks are contiguous */
+ if (chunk) {
+ /*
+ * With N contiguous data blocks, we need at most
+ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+ * 2 dindirect blocks, and 1 tindirect block
+ */
+ return DIV_ROUND_UP(nrblocks,
+ EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
+ }
+ /*
+ * if nrblocks are not contiguous, worse case, each block touch
+ * a indirect block, and each indirect block touch a double indirect
+ * block, plus a triple indirect block
+ */
+ indirects = nrblocks * 2 + 1;
+ return indirects;
+}
+
+/*
+ * Truncate transactions can be complex and absolutely huge. So we need to
+ * be able to restart the transaction at a conventient checkpoint to make
+ * sure we don't overflow the journal.
+ *
+ * start_transaction gets us a new handle for a truncate transaction,
+ * and extend_transaction tries to extend the existing one a bit. If
+ * extend fails, we need to propagate the failure up and restart the
+ * transaction in the top-level truncate loop. --sct
+ */
+static handle_t *start_transaction(struct inode *inode)
+{
+ handle_t *result;
+
+ result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode));
+ if (!IS_ERR(result))
+ return result;
+
+ ext4_std_error(inode->i_sb, PTR_ERR(result));
+ return result;
+}
+
+/*
+ * Try to extend this transaction for the purposes of truncation.
+ *
+ * Returns 0 if we managed to create more room. If we can't create more
+ * room, and the transaction must be restarted we return 1.
+ */
+static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
+{
+ if (!ext4_handle_valid(handle))
+ return 0;
+ if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
+ return 0;
+ if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
+ return 0;
+ return 1;
+}
+
+/*
+ * Probably it should be a library function... search for first non-zero word
+ * or memcmp with zero_page, whatever is better for particular architecture.
+ * Linus?
+ */
+static inline int all_zeroes(__le32 *p, __le32 *q)
+{
+ while (p < q)
+ if (*p++)
+ return 0;
+ return 1;
+}
+
+/**
+ * ext4_find_shared - find the indirect blocks for partial truncation.
+ * @inode: inode in question
+ * @depth: depth of the affected branch
+ * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
+ * @chain: place to store the pointers to partial indirect blocks
+ * @top: place to the (detached) top of branch
+ *
+ * This is a helper function used by ext4_truncate().
+ *
+ * When we do truncate() we may have to clean the ends of several
+ * indirect blocks but leave the blocks themselves alive. Block is
+ * partially truncated if some data below the new i_size is referred
+ * from it (and it is on the path to the first completely truncated
+ * data block, indeed). We have to free the top of that path along
+ * with everything to the right of the path. Since no allocation
+ * past the truncation point is possible until ext4_truncate()
+ * finishes, we may safely do the latter, but top of branch may
+ * require special attention - pageout below the truncation point
+ * might try to populate it.
+ *
+ * We atomically detach the top of branch from the tree, store the
+ * block number of its root in *@top, pointers to buffer_heads of
+ * partially truncated blocks - in @chain[].bh and pointers to
+ * their last elements that should not be removed - in
+ * @chain[].p. Return value is the pointer to last filled element
+ * of @chain.
+ *
+ * The work left to caller to do the actual freeing of subtrees:
+ * a) free the subtree starting from *@top
+ * b) free the subtrees whose roots are stored in
+ * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
+ * c) free the subtrees growing from the inode past the @chain[0].
+ * (no partially truncated stuff there). */
+
+static Indirect *ext4_find_shared(struct inode *inode, int depth,
+ ext4_lblk_t offsets[4], Indirect chain[4],
+ __le32 *top)
+{
+ Indirect *partial, *p;
+ int k, err;
+
+ *top = 0;
+ /* Make k index the deepest non-null offset + 1 */
+ for (k = depth; k > 1 && !offsets[k-1]; k--)
+ ;
+ partial = ext4_get_branch(inode, k, offsets, chain, &err);
+ /* Writer: pointers */
+ if (!partial)
+ partial = chain + k-1;
+ /*
+ * If the branch acquired continuation since we've looked at it -
+ * fine, it should all survive and (new) top doesn't belong to us.
+ */
+ if (!partial->key && *partial->p)
+ /* Writer: end */
+ goto no_top;
+ for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
+ ;
+ /*
+ * OK, we've found the last block that must survive. The rest of our
+ * branch should be detached before unlocking. However, if that rest
+ * of branch is all ours and does not grow immediately from the inode
+ * it's easier to cheat and just decrement partial->p.
+ */
+ if (p == chain + k - 1 && p > chain) {
+ p->p--;
+ } else {
+ *top = *p->p;
+ /* Nope, don't do this in ext4. Must leave the tree intact */
+#if 0
+ *p->p = 0;
+#endif
+ }
+ /* Writer: end */
+
+ while (partial > p) {
+ brelse(partial->bh);
+ partial--;
+ }
+no_top:
+ return partial;
+}
+
+/*
+ * Zero a number of block pointers in either an inode or an indirect block.
+ * If we restart the transaction we must again get write access to the
+ * indirect block for further modification.
+ *
+ * We release `count' blocks on disk, but (last - first) may be greater
+ * than `count' because there can be holes in there.
+ *
+ * Return 0 on success, 1 on invalid block range
+ * and < 0 on fatal error.
+ */
+static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh,
+ ext4_fsblk_t block_to_free,
+ unsigned long count, __le32 *first,
+ __le32 *last)
+{
+ __le32 *p;
+ int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
+ int err;
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ flags |= EXT4_FREE_BLOCKS_METADATA;
+
+ if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
+ count)) {
+ EXT4_ERROR_INODE(inode, "attempt to clear invalid "
+ "blocks %llu len %lu",
+ (unsigned long long) block_to_free, count);
+ return 1;
+ }
+
+ if (try_to_extend_transaction(handle, inode)) {
+ if (bh) {
+ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (unlikely(err))
+ goto out_err;
+ }
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err))
+ goto out_err;
+ err = ext4_truncate_restart_trans(handle, inode,
+ ext4_blocks_for_truncate(inode));
+ if (unlikely(err))
+ goto out_err;
+ if (bh) {
+ BUFFER_TRACE(bh, "retaking write access");
+ err = ext4_journal_get_write_access(handle, bh);
+ if (unlikely(err))
+ goto out_err;
+ }
+ }
+
+ for (p = first; p < last; p++)
+ *p = 0;
+
+ ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
+ return 0;
+out_err:
+ ext4_std_error(inode->i_sb, err);
+ return err;
+}
+
+/**
+ * ext4_free_data - free a list of data blocks
+ * @handle: handle for this transaction
+ * @inode: inode we are dealing with
+ * @this_bh: indirect buffer_head which contains *@first and *@last
+ * @first: array of block numbers
+ * @last: points immediately past the end of array
+ *
+ * We are freeing all blocks referred from that array (numbers are stored as
+ * little-endian 32-bit) and updating @inode->i_blocks appropriately.
+ *
+ * We accumulate contiguous runs of blocks to free. Conveniently, if these
+ * blocks are contiguous then releasing them at one time will only affect one
+ * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
+ * actually use a lot of journal space.
+ *
+ * @this_bh will be %NULL if @first and @last point into the inode's direct
+ * block pointers.
+ */
+static void ext4_free_data(handle_t *handle, struct inode *inode,
+ struct buffer_head *this_bh,
+ __le32 *first, __le32 *last)
+{
+ ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
+ unsigned long count = 0; /* Number of blocks in the run */
+ __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
+ corresponding to
+ block_to_free */
+ ext4_fsblk_t nr; /* Current block # */
+ __le32 *p; /* Pointer into inode/ind
+ for current block */
+ int err = 0;
+
+ if (this_bh) { /* For indirect block */
+ BUFFER_TRACE(this_bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, this_bh);
+ /* Important: if we can't update the indirect pointers
+ * to the blocks, we can't free them. */
+ if (err)
+ return;
+ }
+
+ for (p = first; p < last; p++) {
+ nr = le32_to_cpu(*p);
+ if (nr) {
+ /* accumulate blocks to free if they're contiguous */
+ if (count == 0) {
+ block_to_free = nr;
+ block_to_free_p = p;
+ count = 1;
+ } else if (nr == block_to_free + count) {
+ count++;
+ } else {
+ err = ext4_clear_blocks(handle, inode, this_bh,
+ block_to_free, count,
+ block_to_free_p, p);
+ if (err)
+ break;
+ block_to_free = nr;
+ block_to_free_p = p;
+ count = 1;
+ }
+ }
+ }
+
+ if (!err && count > 0)
+ err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
+ count, block_to_free_p, p);
+ if (err < 0)
+ /* fatal error */
+ return;
+
+ if (this_bh) {
+ BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
+
+ /*
+ * The buffer head should have an attached journal head at this
+ * point. However, if the data is corrupted and an indirect
+ * block pointed to itself, it would have been detached when
+ * the block was cleared. Check for this instead of OOPSing.
+ */
+ if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
+ ext4_handle_dirty_metadata(handle, inode, this_bh);
+ else
+ EXT4_ERROR_INODE(inode,
+ "circular indirect block detected at "
+ "block %llu",
+ (unsigned long long) this_bh->b_blocknr);
+ }
+}
+
+/**
+ * ext4_free_branches - free an array of branches
+ * @handle: JBD handle for this transaction
+ * @inode: inode we are dealing with
+ * @parent_bh: the buffer_head which contains *@first and *@last
+ * @first: array of block numbers
+ * @last: pointer immediately past the end of array
+ * @depth: depth of the branches to free
+ *
+ * We are freeing all blocks referred from these branches (numbers are
+ * stored as little-endian 32-bit) and updating @inode->i_blocks
+ * appropriately.
+ */
+static void ext4_free_branches(handle_t *handle, struct inode *inode,
+ struct buffer_head *parent_bh,
+ __le32 *first, __le32 *last, int depth)
+{
+ ext4_fsblk_t nr;
+ __le32 *p;
+
+ if (ext4_handle_is_aborted(handle))
+ return;
+
+ if (depth--) {
+ struct buffer_head *bh;
+ int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+ p = last;
+ while (--p >= first) {
+ nr = le32_to_cpu(*p);
+ if (!nr)
+ continue; /* A hole */
+
+ if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
+ nr, 1)) {
+ EXT4_ERROR_INODE(inode,
+ "invalid indirect mapped "
+ "block %lu (level %d)",
+ (unsigned long) nr, depth);
+ break;
+ }
+
+ /* Go read the buffer for the next level down */
+ bh = sb_bread(inode->i_sb, nr);
+
+ /*
+ * A read failure? Report error and clear slot
+ * (should be rare).
+ */
+ if (!bh) {
+ EXT4_ERROR_INODE_BLOCK(inode, nr,
+ "Read failure");
+ continue;
+ }
+
+ /* This zaps the entire block. Bottom up. */
+ BUFFER_TRACE(bh, "free child branches");
+ ext4_free_branches(handle, inode, bh,
+ (__le32 *) bh->b_data,
+ (__le32 *) bh->b_data + addr_per_block,
+ depth);
+ brelse(bh);
+
+ /*
+ * Everything below this this pointer has been
+ * released. Now let this top-of-subtree go.
+ *
+ * We want the freeing of this indirect block to be
+ * atomic in the journal with the updating of the
+ * bitmap block which owns it. So make some room in
+ * the journal.
+ *
+ * We zero the parent pointer *after* freeing its
+ * pointee in the bitmaps, so if extend_transaction()
+ * for some reason fails to put the bitmap changes and
+ * the release into the same transaction, recovery
+ * will merely complain about releasing a free block,
+ * rather than leaking blocks.
+ */
+ if (ext4_handle_is_aborted(handle))
+ return;
+ if (try_to_extend_transaction(handle, inode)) {
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_truncate_restart_trans(handle, inode,
+ ext4_blocks_for_truncate(inode));
+ }
+
+ /*
+ * The forget flag here is critical because if
+ * we are journaling (and not doing data
+ * journaling), we have to make sure a revoke
+ * record is written to prevent the journal
+ * replay from overwriting the (former)
+ * indirect block if it gets reallocated as a
+ * data block. This must happen in the same
+ * transaction where the data blocks are
+ * actually freed.
+ */
+ ext4_free_blocks(handle, inode, NULL, nr, 1,
+ EXT4_FREE_BLOCKS_METADATA|
+ EXT4_FREE_BLOCKS_FORGET);
+
+ if (parent_bh) {
+ /*
+ * The block which we have just freed is
+ * pointed to by an indirect block: journal it
+ */
+ BUFFER_TRACE(parent_bh, "get_write_access");
+ if (!ext4_journal_get_write_access(handle,
+ parent_bh)){
+ *p = 0;
+ BUFFER_TRACE(parent_bh,
+ "call ext4_handle_dirty_metadata");
+ ext4_handle_dirty_metadata(handle,
+ inode,
+ parent_bh);
+ }
+ }
+ }
+ } else {
+ /* We have reached the bottom of the tree. */
+ BUFFER_TRACE(parent_bh, "free data blocks");
+ ext4_free_data(handle, inode, parent_bh, first, last);
+ }
+}
+
+void ext4_ind_truncate(struct inode *inode)
+{
+ handle_t *handle;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ __le32 *i_data = ei->i_data;
+ int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+ struct address_space *mapping = inode->i_mapping;
+ ext4_lblk_t offsets[4];
+ Indirect chain[4];
+ Indirect *partial;
+ __le32 nr = 0;
+ int n = 0;
+ ext4_lblk_t last_block, max_block;
+ unsigned blocksize = inode->i_sb->s_blocksize;
+
+ handle = start_transaction(inode);
+ if (IS_ERR(handle))
+ return; /* AKPM: return what? */
+
+ last_block = (inode->i_size + blocksize-1)
+ >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+ max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
+ >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+
+ if (inode->i_size & (blocksize - 1))
+ if (ext4_block_truncate_page(handle, mapping, inode->i_size))
+ goto out_stop;
+
+ if (last_block != max_block) {
+ n = ext4_block_to_path(inode, last_block, offsets, NULL);
+ if (n == 0)
+ goto out_stop; /* error */
+ }
+
+ /*
+ * OK. This truncate is going to happen. We add the inode to the
+ * orphan list, so that if this truncate spans multiple transactions,
+ * and we crash, we will resume the truncate when the filesystem
+ * recovers. It also marks the inode dirty, to catch the new size.
+ *
+ * Implication: the file must always be in a sane, consistent
+ * truncatable state while each transaction commits.
+ */
+ if (ext4_orphan_add(handle, inode))
+ goto out_stop;
+
+ /*
+ * From here we block out all ext4_get_block() callers who want to
+ * modify the block allocation tree.
+ */
+ down_write(&ei->i_data_sem);
+
+ ext4_discard_preallocations(inode);
+
+ /*
+ * The orphan list entry will now protect us from any crash which
+ * occurs before the truncate completes, so it is now safe to propagate
+ * the new, shorter inode size (held for now in i_size) into the
+ * on-disk inode. We do this via i_disksize, which is the value which
+ * ext4 *really* writes onto the disk inode.
+ */
+ ei->i_disksize = inode->i_size;
+
+ if (last_block == max_block) {
+ /*
+ * It is unnecessary to free any data blocks if last_block is
+ * equal to the indirect block limit.
+ */
+ goto out_unlock;
+ } else if (n == 1) { /* direct blocks */
+ ext4_free_data(handle, inode, NULL, i_data+offsets[0],
+ i_data + EXT4_NDIR_BLOCKS);
+ goto do_indirects;
+ }
+
+ partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+ /* Kill the top of shared branch (not detached) */
+ if (nr) {
+ if (partial == chain) {
+ /* Shared branch grows from the inode */
+ ext4_free_branches(handle, inode, NULL,
+ &nr, &nr+1, (chain+n-1) - partial);
+ *partial->p = 0;
+ /*
+ * We mark the inode dirty prior to restart,
+ * and prior to stop. No need for it here.
+ */
+ } else {
+ /* Shared branch grows from an indirect block */
+ BUFFER_TRACE(partial->bh, "get_write_access");
+ ext4_free_branches(handle, inode, partial->bh,
+ partial->p,
+ partial->p+1, (chain+n-1) - partial);
+ }
+ }
+ /* Clear the ends of indirect blocks on the shared branch */
+ while (partial > chain) {
+ ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
+ (__le32*)partial->bh->b_data+addr_per_block,
+ (chain+n-1) - partial);
+ BUFFER_TRACE(partial->bh, "call brelse");
+ brelse(partial->bh);
+ partial--;
+ }
+do_indirects:
+ /* Kill the remaining (whole) subtrees */
+ switch (offsets[0]) {
+ default:
+ nr = i_data[EXT4_IND_BLOCK];
+ if (nr) {
+ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
+ i_data[EXT4_IND_BLOCK] = 0;
+ }
+ case EXT4_IND_BLOCK:
+ nr = i_data[EXT4_DIND_BLOCK];
+ if (nr) {
+ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
+ i_data[EXT4_DIND_BLOCK] = 0;
+ }
+ case EXT4_DIND_BLOCK:
+ nr = i_data[EXT4_TIND_BLOCK];
+ if (nr) {
+ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
+ i_data[EXT4_TIND_BLOCK] = 0;
+ }
+ case EXT4_TIND_BLOCK:
+ ;
+ }
+
+out_unlock:
+ up_write(&ei->i_data_sem);
+ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
+
+ /*
+ * In a multi-transaction truncate, we only make the final transaction
+ * synchronous
+ */
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+out_stop:
+ /*
+ * If this was a simple ftruncate(), and the file will remain alive
+ * then we need to clear up the orphan record which we created above.
+ * However, if this was a real unlink then we were called by
+ * ext4_delete_inode(), and we allow that function to clean up the
+ * orphan info for us.
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+
+ ext4_journal_stop(handle);
+ trace_ext4_truncate_exit(inode);
+}
+
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3e5191f9f39..d47264cafee 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -12,10 +12,6 @@
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
- * Goal-directed block allocation by Stephen Tweedie
- * (sct@redhat.com), 1993, 1998
- * Big-endian to little-endian byte-swapping/bitmaps by
- * David S. Miller (davem@caip.rutgers.edu), 1995
* 64-bit file support on 64-bit platforms by Jakub Jelinek
* (jj@sunsite.ms.mff.cuni.cz)
*
@@ -47,6 +43,7 @@
#include "xattr.h"
#include "acl.h"
#include "ext4_extents.h"
+#include "truncate.h"
#include <trace/events/ext4.h>
@@ -89,72 +86,6 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
}
/*
- * Work out how many blocks we need to proceed with the next chunk of a
- * truncate transaction.
- */
-static unsigned long blocks_for_truncate(struct inode *inode)
-{
- ext4_lblk_t needed;
-
- needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
-
- /* Give ourselves just enough room to cope with inodes in which
- * i_blocks is corrupt: we've seen disk corruptions in the past
- * which resulted in random data in an inode which looked enough
- * like a regular file for ext4 to try to delete it. Things
- * will go a bit crazy if that happens, but at least we should
- * try not to panic the whole kernel. */
- if (needed < 2)
- needed = 2;
-
- /* But we need to bound the transaction so we don't overflow the
- * journal. */
- if (needed > EXT4_MAX_TRANS_DATA)
- needed = EXT4_MAX_TRANS_DATA;
-
- return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
-}
-
-/*
- * Truncate transactions can be complex and absolutely huge. So we need to
- * be able to restart the transaction at a conventient checkpoint to make
- * sure we don't overflow the journal.
- *
- * start_transaction gets us a new handle for a truncate transaction,
- * and extend_transaction tries to extend the existing one a bit. If
- * extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct
- */
-static handle_t *start_transaction(struct inode *inode)
-{
- handle_t *result;
-
- result = ext4_journal_start(inode, blocks_for_truncate(inode));
- if (!IS_ERR(result))
- return result;
-
- ext4_std_error(inode->i_sb, PTR_ERR(result));
- return result;
-}
-
-/*
- * Try to extend this transaction for the purposes of truncation.
- *
- * Returns 0 if we managed to create more room. If we can't create more
- * room, and the transaction must be restarted we return 1.
- */
-static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
-{
- if (!ext4_handle_valid(handle))
- return 0;
- if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
- return 0;
- if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
- return 0;
- return 1;
-}
-
-/*
* Restart the transaction associated with *handle. This does a commit,
* so before we call here everything must be consistently dirtied against
* this transaction.
@@ -190,6 +121,33 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
if (inode->i_nlink) {
+ /*
+ * When journalling data dirty buffers are tracked only in the
+ * journal. So although mm thinks everything is clean and
+ * ready for reaping the inode might still have some pages to
+ * write in the running transaction or waiting to be
+ * checkpointed. Thus calling jbd2_journal_invalidatepage()
+ * (via truncate_inode_pages()) to discard these buffers can
+ * cause data loss. Also even if we did not discard these
+ * buffers, we would have no way to find them after the inode
+ * is reaped and thus user could see stale data if he tries to
+ * read them before the transaction is checkpointed. So be
+ * careful and force everything to disk here... We use
+ * ei->i_datasync_tid to store the newest transaction
+ * containing inode's data.
+ *
+ * Note that directories do not have this problem because they
+ * don't use page cache.
+ */
+ if (ext4_should_journal_data(inode) &&
+ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+
+ jbd2_log_start_commit(journal, commit_tid);
+ jbd2_log_wait_commit(journal, commit_tid);
+ filemap_write_and_wait(&inode->i_data);
+ }
truncate_inode_pages(&inode->i_data, 0);
goto no_delete;
}
@@ -204,7 +162,7 @@ void ext4_evict_inode(struct inode *inode)
if (is_bad_inode(inode))
goto no_delete;
- handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
+ handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
/*
@@ -277,793 +235,6 @@ no_delete:
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
-typedef struct {
- __le32 *p;
- __le32 key;
- struct buffer_head *bh;
-} Indirect;
-
-static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
-{
- p->key = *(p->p = v);
- p->bh = bh;
-}
-
-/**
- * ext4_block_to_path - parse the block number into array of offsets
- * @inode: inode in question (we are only interested in its superblock)
- * @i_block: block number to be parsed
- * @offsets: array to store the offsets in
- * @boundary: set this non-zero if the referred-to block is likely to be
- * followed (on disk) by an indirect block.
- *
- * To store the locations of file's data ext4 uses a data structure common
- * for UNIX filesystems - tree of pointers anchored in the inode, with
- * data blocks at leaves and indirect blocks in intermediate nodes.
- * This function translates the block number into path in that tree -
- * return value is the path length and @offsets[n] is the offset of
- * pointer to (n+1)th node in the nth one. If @block is out of range
- * (negative or too large) warning is printed and zero returned.
- *
- * Note: function doesn't find node addresses, so no IO is needed. All
- * we need to know is the capacity of indirect blocks (taken from the
- * inode->i_sb).
- */
-
-/*
- * Portability note: the last comparison (check that we fit into triple
- * indirect block) is spelled differently, because otherwise on an
- * architecture with 32-bit longs and 8Kb pages we might get into trouble
- * if our filesystem had 8Kb blocks. We might use long long, but that would
- * kill us on x86. Oh, well, at least the sign propagation does not matter -
- * i_block would have to be negative in the very beginning, so we would not
- * get there at all.
- */
-
-static int ext4_block_to_path(struct inode *inode,
- ext4_lblk_t i_block,
- ext4_lblk_t offsets[4], int *boundary)
-{
- int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
- int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
- const long direct_blocks = EXT4_NDIR_BLOCKS,
- indirect_blocks = ptrs,
- double_blocks = (1 << (ptrs_bits * 2));
- int n = 0;
- int final = 0;
-
- if (i_block < direct_blocks) {
- offsets[n++] = i_block;
- final = direct_blocks;
- } else if ((i_block -= direct_blocks) < indirect_blocks) {
- offsets[n++] = EXT4_IND_BLOCK;
- offsets[n++] = i_block;
- final = ptrs;
- } else if ((i_block -= indirect_blocks) < double_blocks) {
- offsets[n++] = EXT4_DIND_BLOCK;
- offsets[n++] = i_block >> ptrs_bits;
- offsets[n++] = i_block & (ptrs - 1);
- final = ptrs;
- } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
- offsets[n++] = EXT4_TIND_BLOCK;
- offsets[n++] = i_block >> (ptrs_bits * 2);
- offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
- offsets[n++] = i_block & (ptrs - 1);
- final = ptrs;
- } else {
- ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
- i_block + direct_blocks +
- indirect_blocks + double_blocks, inode->i_ino);
- }
- if (boundary)
- *boundary = final - 1 - (i_block & (ptrs - 1));
- return n;
-}
-
-static int __ext4_check_blockref(const char *function, unsigned int line,
- struct inode *inode,
- __le32 *p, unsigned int max)
-{
- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
- __le32 *bref = p;
- unsigned int blk;
-
- while (bref < p+max) {
- blk = le32_to_cpu(*bref++);
- if (blk &&
- unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
- blk, 1))) {
- es->s_last_error_block = cpu_to_le64(blk);
- ext4_error_inode(inode, function, line, blk,
- "invalid block");
- return -EIO;
- }
- }
- return 0;
-}
-
-
-#define ext4_check_indirect_blockref(inode, bh) \
- __ext4_check_blockref(__func__, __LINE__, inode, \
- (__le32 *)(bh)->b_data, \
- EXT4_ADDR_PER_BLOCK((inode)->i_sb))
-
-#define ext4_check_inode_blockref(inode) \
- __ext4_check_blockref(__func__, __LINE__, inode, \
- EXT4_I(inode)->i_data, \
- EXT4_NDIR_BLOCKS)
-
-/**
- * ext4_get_branch - read the chain of indirect blocks leading to data
- * @inode: inode in question
- * @depth: depth of the chain (1 - direct pointer, etc.)
- * @offsets: offsets of pointers in inode/indirect blocks
- * @chain: place to store the result
- * @err: here we store the error value
- *
- * Function fills the array of triples <key, p, bh> and returns %NULL
- * if everything went OK or the pointer to the last filled triple
- * (incomplete one) otherwise. Upon the return chain[i].key contains
- * the number of (i+1)-th block in the chain (as it is stored in memory,
- * i.e. little-endian 32-bit), chain[i].p contains the address of that
- * number (it points into struct inode for i==0 and into the bh->b_data
- * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
- * block for i>0 and NULL for i==0. In other words, it holds the block
- * numbers of the chain, addresses they were taken from (and where we can
- * verify that chain did not change) and buffer_heads hosting these
- * numbers.
- *
- * Function stops when it stumbles upon zero pointer (absent block)
- * (pointer to last triple returned, *@err == 0)
- * or when it gets an IO error reading an indirect block
- * (ditto, *@err == -EIO)
- * or when it reads all @depth-1 indirect blocks successfully and finds
- * the whole chain, all way to the data (returns %NULL, *err == 0).
- *
- * Need to be called with
- * down_read(&EXT4_I(inode)->i_data_sem)
- */
-static Indirect *ext4_get_branch(struct inode *inode, int depth,
- ext4_lblk_t *offsets,
- Indirect chain[4], int *err)
-{
- struct super_block *sb = inode->i_sb;
- Indirect *p = chain;
- struct buffer_head *bh;
-
- *err = 0;
- /* i_data is not going away, no lock needed */
- add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
- if (!p->key)
- goto no_block;
- while (--depth) {
- bh = sb_getblk(sb, le32_to_cpu(p->key));
- if (unlikely(!bh))
- goto failure;
-
- if (!bh_uptodate_or_lock(bh)) {
- if (bh_submit_read(bh) < 0) {
- put_bh(bh);
- goto failure;
- }
- /* validate block references */
- if (ext4_check_indirect_blockref(inode, bh)) {
- put_bh(bh);
- goto failure;
- }
- }
-
- add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
- /* Reader: end */
- if (!p->key)
- goto no_block;
- }
- return NULL;
-
-failure:
- *err = -EIO;
-no_block:
- return p;
-}
-
-/**
- * ext4_find_near - find a place for allocation with sufficient locality
- * @inode: owner
- * @ind: descriptor of indirect block.
- *
- * This function returns the preferred place for block allocation.
- * It is used when heuristic for sequential allocation fails.
- * Rules are:
- * + if there is a block to the left of our position - allocate near it.
- * + if pointer will live in indirect block - allocate near that block.
- * + if pointer will live in inode - allocate in the same
- * cylinder group.
- *
- * In the latter case we colour the starting block by the callers PID to
- * prevent it from clashing with concurrent allocations for a different inode
- * in the same block group. The PID is used here so that functionally related
- * files will be close-by on-disk.
- *
- * Caller must make sure that @ind is valid and will stay that way.
- */
-static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
- __le32 *p;
- ext4_fsblk_t bg_start;
- ext4_fsblk_t last_block;
- ext4_grpblk_t colour;
- ext4_group_t block_group;
- int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
-
- /* Try to find previous block */
- for (p = ind->p - 1; p >= start; p--) {
- if (*p)
- return le32_to_cpu(*p);
- }
-
- /* No such thing, so let's try location of indirect block */
- if (ind->bh)
- return ind->bh->b_blocknr;
-
- /*
- * It is going to be referred to from the inode itself? OK, just put it
- * into the same cylinder group then.
- */
- block_group = ei->i_block_group;
- if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
- block_group &= ~(flex_size-1);
- if (S_ISREG(inode->i_mode))
- block_group++;
- }
- bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
- last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
-
- /*
- * If we are doing delayed allocation, we don't need take
- * colour into account.
- */
- if (test_opt(inode->i_sb, DELALLOC))
- return bg_start;
-
- if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
- colour = (current->pid % 16) *
- (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
- else
- colour = (current->pid % 16) * ((last_block - bg_start) / 16);
- return bg_start + colour;
-}
-
-/**
- * ext4_find_goal - find a preferred place for allocation.
- * @inode: owner
- * @block: block we want
- * @partial: pointer to the last triple within a chain
- *
- * Normally this function find the preferred place for block allocation,
- * returns it.
- * Because this is only used for non-extent files, we limit the block nr
- * to 32 bits.
- */
-static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
- Indirect *partial)
-{
- ext4_fsblk_t goal;
-
- /*
- * XXX need to get goal block from mballoc's data structures
- */
-
- goal = ext4_find_near(inode, partial);
- goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
- return goal;
-}
-
-/**
- * ext4_blks_to_allocate - Look up the block map and count the number
- * of direct blocks need to be allocated for the given branch.
- *
- * @branch: chain of indirect blocks
- * @k: number of blocks need for indirect blocks
- * @blks: number of data blocks to be mapped.
- * @blocks_to_boundary: the offset in the indirect block
- *
- * return the total number of blocks to be allocate, including the
- * direct and indirect blocks.
- */
-static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
- int blocks_to_boundary)
-{
- unsigned int count = 0;
-
- /*
- * Simple case, [t,d]Indirect block(s) has not allocated yet
- * then it's clear blocks on that path have not allocated
- */
- if (k > 0) {
- /* right now we don't handle cross boundary allocation */
- if (blks < blocks_to_boundary + 1)
- count += blks;
- else
- count += blocks_to_boundary + 1;
- return count;
- }
-
- count++;
- while (count < blks && count <= blocks_to_boundary &&
- le32_to_cpu(*(branch[0].p + count)) == 0) {
- count++;
- }
- return count;
-}
-
-/**
- * ext4_alloc_blocks: multiple allocate blocks needed for a branch
- * @handle: handle for this transaction
- * @inode: inode which needs allocated blocks
- * @iblock: the logical block to start allocated at
- * @goal: preferred physical block of allocation
- * @indirect_blks: the number of blocks need to allocate for indirect
- * blocks
- * @blks: number of desired blocks
- * @new_blocks: on return it will store the new block numbers for
- * the indirect blocks(if needed) and the first direct block,
- * @err: on return it will store the error code
- *
- * This function will return the number of blocks allocated as
- * requested by the passed-in parameters.
- */
-static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
- ext4_lblk_t iblock, ext4_fsblk_t goal,
- int indirect_blks, int blks,
- ext4_fsblk_t new_blocks[4], int *err)
-{
- struct ext4_allocation_request ar;
- int target, i;
- unsigned long count = 0, blk_allocated = 0;
- int index = 0;
- ext4_fsblk_t current_block = 0;
- int ret = 0;
-
- /*
- * Here we try to allocate the requested multiple blocks at once,
- * on a best-effort basis.
- * To build a branch, we should allocate blocks for
- * the indirect blocks(if not allocated yet), and at least
- * the first direct block of this branch. That's the
- * minimum number of blocks need to allocate(required)
- */
- /* first we try to allocate the indirect blocks */
- target = indirect_blks;
- while (target > 0) {
- count = target;
- /* allocating blocks for indirect blocks and direct blocks */
- current_block = ext4_new_meta_blocks(handle, inode, goal,
- 0, &count, err);
- if (*err)
- goto failed_out;
-
- if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
- EXT4_ERROR_INODE(inode,
- "current_block %llu + count %lu > %d!",
- current_block, count,
- EXT4_MAX_BLOCK_FILE_PHYS);
- *err = -EIO;
- goto failed_out;
- }
-
- target -= count;
- /* allocate blocks for indirect blocks */
- while (index < indirect_blks && count) {
- new_blocks[index++] = current_block++;
- count--;
- }
- if (count > 0) {
- /*
- * save the new block number
- * for the first direct block
- */
- new_blocks[index] = current_block;
- printk(KERN_INFO "%s returned more blocks than "
- "requested\n", __func__);
- WARN_ON(1);
- break;
- }
- }
-
- target = blks - count ;
- blk_allocated = count;
- if (!target)
- goto allocated;
- /* Now allocate data blocks */
- memset(&ar, 0, sizeof(ar));
- ar.inode = inode;
- ar.goal = goal;
- ar.len = target;
- ar.logical = iblock;
- if (S_ISREG(inode->i_mode))
- /* enable in-core preallocation only for regular files */
- ar.flags = EXT4_MB_HINT_DATA;
-
- current_block = ext4_mb_new_blocks(handle, &ar, err);
- if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
- EXT4_ERROR_INODE(inode,
- "current_block %llu + ar.len %d > %d!",
- current_block, ar.len,
- EXT4_MAX_BLOCK_FILE_PHYS);
- *err = -EIO;
- goto failed_out;
- }
-
- if (*err && (target == blks)) {
- /*
- * if the allocation failed and we didn't allocate
- * any blocks before
- */
- goto failed_out;
- }
- if (!*err) {
- if (target == blks) {
- /*
- * save the new block number
- * for the first direct block
- */
- new_blocks[index] = current_block;
- }
- blk_allocated += ar.len;
- }
-allocated:
- /* total number of blocks allocated for direct blocks */
- ret = blk_allocated;
- *err = 0;
- return ret;
-failed_out:
- for (i = 0; i < index; i++)
- ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
- return ret;
-}
-
-/**
- * ext4_alloc_branch - allocate and set up a chain of blocks.
- * @handle: handle for this transaction
- * @inode: owner
- * @indirect_blks: number of allocated indirect blocks
- * @blks: number of allocated direct blocks
- * @goal: preferred place for allocation
- * @offsets: offsets (in the blocks) to store the pointers to next.
- * @branch: place to store the chain in.
- *
- * This function allocates blocks, zeroes out all but the last one,
- * links them into chain and (if we are synchronous) writes them to disk.
- * In other words, it prepares a branch that can be spliced onto the
- * inode. It stores the information about that chain in the branch[], in
- * the same format as ext4_get_branch() would do. We are calling it after
- * we had read the existing part of chain and partial points to the last
- * triple of that (one with zero ->key). Upon the exit we have the same
- * picture as after the successful ext4_get_block(), except that in one
- * place chain is disconnected - *branch->p is still zero (we did not
- * set the last link), but branch->key contains the number that should
- * be placed into *branch->p to fill that gap.
- *
- * If allocation fails we free all blocks we've allocated (and forget
- * their buffer_heads) and return the error value the from failed
- * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
- * as described above and return 0.
- */
-static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
- ext4_lblk_t iblock, int indirect_blks,
- int *blks, ext4_fsblk_t goal,
- ext4_lblk_t *offsets, Indirect *branch)
-{
- int blocksize = inode->i_sb->s_blocksize;
- int i, n = 0;
- int err = 0;
- struct buffer_head *bh;
- int num;
- ext4_fsblk_t new_blocks[4];
- ext4_fsblk_t current_block;
-
- num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
- *blks, new_blocks, &err);
- if (err)
- return err;
-
- branch[0].key = cpu_to_le32(new_blocks[0]);
- /*
- * metadata blocks and data blocks are allocated.
- */
- for (n = 1; n <= indirect_blks; n++) {
- /*
- * Get buffer_head for parent block, zero it out
- * and set the pointer to new one, then send
- * parent to disk.
- */
- bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
- if (unlikely(!bh)) {
- err = -EIO;
- goto failed;
- }
-
- branch[n].bh = bh;
- lock_buffer(bh);
- BUFFER_TRACE(bh, "call get_create_access");
- err = ext4_journal_get_create_access(handle, bh);
- if (err) {
- /* Don't brelse(bh) here; it's done in
- * ext4_journal_forget() below */
- unlock_buffer(bh);
- goto failed;
- }
-
- memset(bh->b_data, 0, blocksize);
- branch[n].p = (__le32 *) bh->b_data + offsets[n];
- branch[n].key = cpu_to_le32(new_blocks[n]);
- *branch[n].p = branch[n].key;
- if (n == indirect_blks) {
- current_block = new_blocks[n];
- /*
- * End of chain, update the last new metablock of
- * the chain to point to the new allocated
- * data blocks numbers
- */
- for (i = 1; i < num; i++)
- *(branch[n].p + i) = cpu_to_le32(++current_block);
- }
- BUFFER_TRACE(bh, "marking uptodate");
- set_buffer_uptodate(bh);
- unlock_buffer(bh);
-
- BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, bh);
- if (err)
- goto failed;
- }
- *blks = num;
- return err;
-failed:
- /* Allocation failed, free what we already allocated */
- ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
- for (i = 1; i <= n ; i++) {
- /*
- * branch[i].bh is newly allocated, so there is no
- * need to revoke the block, which is why we don't
- * need to set EXT4_FREE_BLOCKS_METADATA.
- */
- ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
- EXT4_FREE_BLOCKS_FORGET);
- }
- for (i = n+1; i < indirect_blks; i++)
- ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
-
- ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
-
- return err;
-}
-
-/**
- * ext4_splice_branch - splice the allocated branch onto inode.
- * @handle: handle for this transaction
- * @inode: owner
- * @block: (logical) number of block we are adding
- * @chain: chain of indirect blocks (with a missing link - see
- * ext4_alloc_branch)
- * @where: location of missing link
- * @num: number of indirect blocks we are adding
- * @blks: number of direct blocks we are adding
- *
- * This function fills the missing link and does all housekeeping needed in
- * inode (->i_blocks, etc.). In case of success we end up with the full
- * chain to new block and return 0.
- */
-static int ext4_splice_branch(handle_t *handle, struct inode *inode,
- ext4_lblk_t block, Indirect *where, int num,
- int blks)
-{
- int i;
- int err = 0;
- ext4_fsblk_t current_block;
-
- /*
- * If we're splicing into a [td]indirect block (as opposed to the
- * inode) then we need to get write access to the [td]indirect block
- * before the splice.
- */
- if (where->bh) {
- BUFFER_TRACE(where->bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, where->bh);
- if (err)
- goto err_out;
- }
- /* That's it */
-
- *where->p = where->key;
-
- /*
- * Update the host buffer_head or inode to point to more just allocated
- * direct blocks blocks
- */
- if (num == 0 && blks > 1) {
- current_block = le32_to_cpu(where->key) + 1;
- for (i = 1; i < blks; i++)
- *(where->p + i) = cpu_to_le32(current_block++);
- }
-
- /* We are done with atomic stuff, now do the rest of housekeeping */
- /* had we spliced it onto indirect block? */
- if (where->bh) {
- /*
- * If we spliced it onto an indirect block, we haven't
- * altered the inode. Note however that if it is being spliced
- * onto an indirect block at the very end of the file (the
- * file is growing) then we *will* alter the inode to reflect
- * the new i_size. But that is not done here - it is done in
- * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
- */
- jbd_debug(5, "splicing indirect only\n");
- BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, where->bh);
- if (err)
- goto err_out;
- } else {
- /*
- * OK, we spliced it into the inode itself on a direct block.
- */
- ext4_mark_inode_dirty(handle, inode);
- jbd_debug(5, "splicing direct\n");
- }
- return err;
-
-err_out:
- for (i = 1; i <= num; i++) {
- /*
- * branch[i].bh is newly allocated, so there is no
- * need to revoke the block, which is why we don't
- * need to set EXT4_FREE_BLOCKS_METADATA.
- */
- ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
- EXT4_FREE_BLOCKS_FORGET);
- }
- ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
- blks, 0);
-
- return err;
-}
-
-/*
- * The ext4_ind_map_blocks() function handles non-extents inodes
- * (i.e., using the traditional indirect/double-indirect i_blocks
- * scheme) for ext4_map_blocks().
- *
- * Allocation strategy is simple: if we have to allocate something, we will
- * have to go the whole way to leaf. So let's do it before attaching anything
- * to tree, set linkage between the newborn blocks, write them if sync is
- * required, recheck the path, free and repeat if check fails, otherwise
- * set the last missing link (that will protect us from any truncate-generated
- * removals - all blocks on the path are immune now) and possibly force the
- * write on the parent block.
- * That has a nice additional property: no special recovery from the failed
- * allocations is needed - we simply release blocks and do not touch anything
- * reachable from inode.
- *
- * `handle' can be NULL if create == 0.
- *
- * return > 0, # of blocks mapped or allocated.
- * return = 0, if plain lookup failed.
- * return < 0, error case.
- *
- * The ext4_ind_get_blocks() function should be called with
- * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
- * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
- * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
- * blocks.
- */
-static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
- struct ext4_map_blocks *map,
- int flags)
-{
- int err = -EIO;
- ext4_lblk_t offsets[4];
- Indirect chain[4];
- Indirect *partial;
- ext4_fsblk_t goal;
- int indirect_blks;
- int blocks_to_boundary = 0;
- int depth;
- int count = 0;
- ext4_fsblk_t first_block = 0;
-
- trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
- J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
- J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
- depth = ext4_block_to_path(inode, map->m_lblk, offsets,
- &blocks_to_boundary);
-
- if (depth == 0)
- goto out;
-
- partial = ext4_get_branch(inode, depth, offsets, chain, &err);
-
- /* Simplest case - block found, no allocation needed */
- if (!partial) {
- first_block = le32_to_cpu(chain[depth - 1].key);
- count++;
- /*map more blocks*/
- while (count < map->m_len && count <= blocks_to_boundary) {
- ext4_fsblk_t blk;
-
- blk = le32_to_cpu(*(chain[depth-1].p + count));
-
- if (blk == first_block + count)
- count++;
- else
- break;
- }
- goto got_it;
- }
-
- /* Next simple case - plain lookup or failed read of indirect block */
- if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
- goto cleanup;
-
- /*
- * Okay, we need to do block allocation.
- */
- goal = ext4_find_goal(inode, map->m_lblk, partial);
-
- /* the number of blocks need to allocate for [d,t]indirect blocks */
- indirect_blks = (chain + depth) - partial - 1;
-
- /*
- * Next look up the indirect map to count the totoal number of
- * direct blocks to allocate for this branch.
- */
- count = ext4_blks_to_allocate(partial, indirect_blks,
- map->m_len, blocks_to_boundary);
- /*
- * Block out ext4_truncate while we alter the tree
- */
- err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
- &count, goal,
- offsets + (partial - chain), partial);
-
- /*
- * The ext4_splice_branch call will free and forget any buffers
- * on the new chain if there is a failure, but that risks using
- * up transaction credits, especially for bitmaps where the
- * credits cannot be returned. Can we handle this somehow? We
- * may need to return -EAGAIN upwards in the worst case. --sct
- */
- if (!err)
- err = ext4_splice_branch(handle, inode, map->m_lblk,
- partial, indirect_blks, count);
- if (err)
- goto cleanup;
-
- map->m_flags |= EXT4_MAP_NEW;
-
- ext4_update_inode_fsync_trans(handle, inode, 1);
-got_it:
- map->m_flags |= EXT4_MAP_MAPPED;
- map->m_pblk = le32_to_cpu(chain[depth-1].key);
- map->m_len = count;
- if (count > blocks_to_boundary)
- map->m_flags |= EXT4_MAP_BOUNDARY;
- err = count;
- /* Clean up and exit */
- partial = chain + depth - 1; /* the whole chain */
-cleanup:
- while (partial > chain) {
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
- partial--;
- }
-out:
- trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
- map->m_pblk, map->m_len, err);
- return err;
-}
-
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
{
@@ -1073,33 +244,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
/*
* Calculate the number of metadata blocks need to reserve
- * to allocate a new block at @lblocks for non extent file based file
- */
-static int ext4_indirect_calc_metadata_amount(struct inode *inode,
- sector_t lblock)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
- int blk_bits;
-
- if (lblock < EXT4_NDIR_BLOCKS)
- return 0;
-
- lblock -= EXT4_NDIR_BLOCKS;
-
- if (ei->i_da_metadata_calc_len &&
- (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
- ei->i_da_metadata_calc_len++;
- return 0;
- }
- ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
- ei->i_da_metadata_calc_len = 1;
- blk_bits = order_base_2(lblock);
- return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
-}
-
-/*
- * Calculate the number of metadata blocks need to reserve
* to allocate a block located at @lblock
*/
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
@@ -1107,7 +251,7 @@ static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return ext4_ext_calc_metadata_amount(inode, lblock);
- return ext4_indirect_calc_metadata_amount(inode, lblock);
+ return ext4_ind_calc_metadata_amount(inode, lblock);
}
/*
@@ -1589,16 +733,6 @@ static int do_journal_get_write_access(handle_t *handle,
return ret;
}
-/*
- * Truncate blocks that were not used by write. We have to truncate the
- * pagecache as well so that corresponding buffers get properly unmapped.
- */
-static void ext4_truncate_failed_write(struct inode *inode)
-{
- truncate_inode_pages(inode->i_mapping, inode->i_size);
- ext4_truncate(inode);
-}
-
static int ext4_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
static int ext4_write_begin(struct file *file, struct address_space *mapping,
@@ -1863,6 +997,7 @@ static int ext4_journalled_write_end(struct file *file,
if (new_i_size > inode->i_size)
i_size_write(inode, pos+copied);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+ EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
if (new_i_size > EXT4_I(inode)->i_disksize) {
ext4_update_i_disksize(inode, new_i_size);
ret2 = ext4_mark_inode_dirty(handle, inode);
@@ -2571,6 +1706,7 @@ static int __ext4_journalled_writepage(struct page *page,
write_end_fn);
if (ret == 0)
ret = err;
+ EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
@@ -3450,112 +2586,6 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
}
/*
- * O_DIRECT for ext3 (or indirect map) based files
- *
- * If the O_DIRECT write will extend the file then add this inode to the
- * orphan list. So recovery will truncate it back to the original size
- * if the machine crashes during the write.
- *
- * If the O_DIRECT write is intantiating holes inside i_size and the machine
- * crashes then stale disk data _may_ be exposed inside the file. But current
- * VFS code falls back into buffered path in that case so we are safe.
- */
-static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov, loff_t offset,
- unsigned long nr_segs)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct ext4_inode_info *ei = EXT4_I(inode);
- handle_t *handle;
- ssize_t ret;
- int orphan = 0;
- size_t count = iov_length(iov, nr_segs);
- int retries = 0;
-
- if (rw == WRITE) {
- loff_t final_size = offset + count;
-
- if (final_size > inode->i_size) {
- /* Credits for sb + inode write */
- handle = ext4_journal_start(inode, 2);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
- }
- ret = ext4_orphan_add(handle, inode);
- if (ret) {
- ext4_journal_stop(handle);
- goto out;
- }
- orphan = 1;
- ei->i_disksize = inode->i_size;
- ext4_journal_stop(handle);
- }
- }
-
-retry:
- if (rw == READ && ext4_should_dioread_nolock(inode))
- ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iov,
- offset, nr_segs,
- ext4_get_block, NULL, NULL, 0);
- else {
- ret = blockdev_direct_IO(rw, iocb, inode, iov,
- offset, nr_segs, ext4_get_block);
-
- if (unlikely((rw & WRITE) && ret < 0)) {
- loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
-
- if (end > isize)
- ext4_truncate_failed_write(inode);
- }
- }
- if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry;
-
- if (orphan) {
- int err;
-
- /* Credits for sb + inode write */
- handle = ext4_journal_start(inode, 2);
- if (IS_ERR(handle)) {
- /* This is really bad luck. We've written the data
- * but cannot extend i_size. Bail out and pretend
- * the write failed... */
- ret = PTR_ERR(handle);
- if (inode->i_nlink)
- ext4_orphan_del(NULL, inode);
-
- goto out;
- }
- if (inode->i_nlink)
- ext4_orphan_del(handle, inode);
- if (ret > 0) {
- loff_t end = offset + ret;
- if (end > inode->i_size) {
- ei->i_disksize = end;
- i_size_write(inode, end);
- /*
- * We're going to return a positive `ret'
- * here due to non-zero-length I/O, so there's
- * no way of reporting error returns from
- * ext4_mark_inode_dirty() to userspace. So
- * ignore it.
- */
- ext4_mark_inode_dirty(handle, inode);
- }
- }
- err = ext4_journal_stop(handle);
- if (ret == 0)
- ret = err;
- }
-out:
- return ret;
-}
-
-/*
* ext4_get_block used when preparing for a DIO write or buffer write.
* We allocate an uinitialized extent if blocks haven't been allocated.
* The extent will be converted to initialized after the IO is complete.
@@ -4033,383 +3063,6 @@ unlock:
return err;
}
-/*
- * Probably it should be a library function... search for first non-zero word
- * or memcmp with zero_page, whatever is better for particular architecture.
- * Linus?
- */
-static inline int all_zeroes(__le32 *p, __le32 *q)
-{
- while (p < q)
- if (*p++)
- return 0;
- return 1;
-}
-
-/**
- * ext4_find_shared - find the indirect blocks for partial truncation.
- * @inode: inode in question
- * @depth: depth of the affected branch
- * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
- * @chain: place to store the pointers to partial indirect blocks
- * @top: place to the (detached) top of branch
- *
- * This is a helper function used by ext4_truncate().
- *
- * When we do truncate() we may have to clean the ends of several
- * indirect blocks but leave the blocks themselves alive. Block is
- * partially truncated if some data below the new i_size is referred
- * from it (and it is on the path to the first completely truncated
- * data block, indeed). We have to free the top of that path along
- * with everything to the right of the path. Since no allocation
- * past the truncation point is possible until ext4_truncate()
- * finishes, we may safely do the latter, but top of branch may
- * require special attention - pageout below the truncation point
- * might try to populate it.
- *
- * We atomically detach the top of branch from the tree, store the
- * block number of its root in *@top, pointers to buffer_heads of
- * partially truncated blocks - in @chain[].bh and pointers to
- * their last elements that should not be removed - in
- * @chain[].p. Return value is the pointer to last filled element
- * of @chain.
- *
- * The work left to caller to do the actual freeing of subtrees:
- * a) free the subtree starting from *@top
- * b) free the subtrees whose roots are stored in
- * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
- * c) free the subtrees growing from the inode past the @chain[0].
- * (no partially truncated stuff there). */
-
-static Indirect *ext4_find_shared(struct inode *inode, int depth,
- ext4_lblk_t offsets[4], Indirect chain[4],
- __le32 *top)
-{
- Indirect *partial, *p;
- int k, err;
-
- *top = 0;
- /* Make k index the deepest non-null offset + 1 */
- for (k = depth; k > 1 && !offsets[k-1]; k--)
- ;
- partial = ext4_get_branch(inode, k, offsets, chain, &err);
- /* Writer: pointers */
- if (!partial)
- partial = chain + k-1;
- /*
- * If the branch acquired continuation since we've looked at it -
- * fine, it should all survive and (new) top doesn't belong to us.
- */
- if (!partial->key && *partial->p)
- /* Writer: end */
- goto no_top;
- for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
- ;
- /*
- * OK, we've found the last block that must survive. The rest of our
- * branch should be detached before unlocking. However, if that rest
- * of branch is all ours and does not grow immediately from the inode
- * it's easier to cheat and just decrement partial->p.
- */
- if (p == chain + k - 1 && p > chain) {
- p->p--;
- } else {
- *top = *p->p;
- /* Nope, don't do this in ext4. Must leave the tree intact */
-#if 0
- *p->p = 0;
-#endif
- }
- /* Writer: end */
-
- while (partial > p) {
- brelse(partial->bh);
- partial--;
- }
-no_top:
- return partial;
-}
-
-/*
- * Zero a number of block pointers in either an inode or an indirect block.
- * If we restart the transaction we must again get write access to the
- * indirect block for further modification.
- *
- * We release `count' blocks on disk, but (last - first) may be greater
- * than `count' because there can be holes in there.
- *
- * Return 0 on success, 1 on invalid block range
- * and < 0 on fatal error.
- */
-static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
- struct buffer_head *bh,
- ext4_fsblk_t block_to_free,
- unsigned long count, __le32 *first,
- __le32 *last)
-{
- __le32 *p;
- int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
- int err;
-
- if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- flags |= EXT4_FREE_BLOCKS_METADATA;
-
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
- count)) {
- EXT4_ERROR_INODE(inode, "attempt to clear invalid "
- "blocks %llu len %lu",
- (unsigned long long) block_to_free, count);
- return 1;
- }
-
- if (try_to_extend_transaction(handle, inode)) {
- if (bh) {
- BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, bh);
- if (unlikely(err))
- goto out_err;
- }
- err = ext4_mark_inode_dirty(handle, inode);
- if (unlikely(err))
- goto out_err;
- err = ext4_truncate_restart_trans(handle, inode,
- blocks_for_truncate(inode));
- if (unlikely(err))
- goto out_err;
- if (bh) {
- BUFFER_TRACE(bh, "retaking write access");
- err = ext4_journal_get_write_access(handle, bh);
- if (unlikely(err))
- goto out_err;
- }
- }
-
- for (p = first; p < last; p++)
- *p = 0;
-
- ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
- return 0;
-out_err:
- ext4_std_error(inode->i_sb, err);
- return err;
-}
-
-/**
- * ext4_free_data - free a list of data blocks
- * @handle: handle for this transaction
- * @inode: inode we are dealing with
- * @this_bh: indirect buffer_head which contains *@first and *@last
- * @first: array of block numbers
- * @last: points immediately past the end of array
- *
- * We are freeing all blocks referred from that array (numbers are stored as
- * little-endian 32-bit) and updating @inode->i_blocks appropriately.
- *
- * We accumulate contiguous runs of blocks to free. Conveniently, if these
- * blocks are contiguous then releasing them at one time will only affect one
- * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
- * actually use a lot of journal space.
- *
- * @this_bh will be %NULL if @first and @last point into the inode's direct
- * block pointers.
- */
-static void ext4_free_data(handle_t *handle, struct inode *inode,
- struct buffer_head *this_bh,
- __le32 *first, __le32 *last)
-{
- ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
- unsigned long count = 0; /* Number of blocks in the run */
- __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
- corresponding to
- block_to_free */
- ext4_fsblk_t nr; /* Current block # */
- __le32 *p; /* Pointer into inode/ind
- for current block */
- int err = 0;
-
- if (this_bh) { /* For indirect block */
- BUFFER_TRACE(this_bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, this_bh);
- /* Important: if we can't update the indirect pointers
- * to the blocks, we can't free them. */
- if (err)
- return;
- }
-
- for (p = first; p < last; p++) {
- nr = le32_to_cpu(*p);
- if (nr) {
- /* accumulate blocks to free if they're contiguous */
- if (count == 0) {
- block_to_free = nr;
- block_to_free_p = p;
- count = 1;
- } else if (nr == block_to_free + count) {
- count++;
- } else {
- err = ext4_clear_blocks(handle, inode, this_bh,
- block_to_free, count,
- block_to_free_p, p);
- if (err)
- break;
- block_to_free = nr;
- block_to_free_p = p;
- count = 1;
- }
- }
- }
-
- if (!err && count > 0)
- err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
- count, block_to_free_p, p);
- if (err < 0)
- /* fatal error */
- return;
-
- if (this_bh) {
- BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
-
- /*
- * The buffer head should have an attached journal head at this
- * point. However, if the data is corrupted and an indirect
- * block pointed to itself, it would have been detached when
- * the block was cleared. Check for this instead of OOPSing.
- */
- if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
- ext4_handle_dirty_metadata(handle, inode, this_bh);
- else
- EXT4_ERROR_INODE(inode,
- "circular indirect block detected at "
- "block %llu",
- (unsigned long long) this_bh->b_blocknr);
- }
-}
-
-/**
- * ext4_free_branches - free an array of branches
- * @handle: JBD handle for this transaction
- * @inode: inode we are dealing with
- * @parent_bh: the buffer_head which contains *@first and *@last
- * @first: array of block numbers
- * @last: pointer immediately past the end of array
- * @depth: depth of the branches to free
- *
- * We are freeing all blocks referred from these branches (numbers are
- * stored as little-endian 32-bit) and updating @inode->i_blocks
- * appropriately.
- */
-static void ext4_free_branches(handle_t *handle, struct inode *inode,
- struct buffer_head *parent_bh,
- __le32 *first, __le32 *last, int depth)
-{
- ext4_fsblk_t nr;
- __le32 *p;
-
- if (ext4_handle_is_aborted(handle))
- return;
-
- if (depth--) {
- struct buffer_head *bh;
- int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
- p = last;
- while (--p >= first) {
- nr = le32_to_cpu(*p);
- if (!nr)
- continue; /* A hole */
-
- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
- nr, 1)) {
- EXT4_ERROR_INODE(inode,
- "invalid indirect mapped "
- "block %lu (level %d)",
- (unsigned long) nr, depth);
- break;
- }
-
- /* Go read the buffer for the next level down */
- bh = sb_bread(inode->i_sb, nr);
-
- /*
- * A read failure? Report error and clear slot
- * (should be rare).
- */
- if (!bh) {
- EXT4_ERROR_INODE_BLOCK(inode, nr,
- "Read failure");
- continue;
- }
-
- /* This zaps the entire block. Bottom up. */
- BUFFER_TRACE(bh, "free child branches");
- ext4_free_branches(handle, inode, bh,
- (__le32 *) bh->b_data,
- (__le32 *) bh->b_data + addr_per_block,
- depth);
- brelse(bh);
-
- /*
- * Everything below this this pointer has been
- * released. Now let this top-of-subtree go.
- *
- * We want the freeing of this indirect block to be
- * atomic in the journal with the updating of the
- * bitmap block which owns it. So make some room in
- * the journal.
- *
- * We zero the parent pointer *after* freeing its
- * pointee in the bitmaps, so if extend_transaction()
- * for some reason fails to put the bitmap changes and
- * the release into the same transaction, recovery
- * will merely complain about releasing a free block,
- * rather than leaking blocks.
- */
- if (ext4_handle_is_aborted(handle))
- return;
- if (try_to_extend_transaction(handle, inode)) {
- ext4_mark_inode_dirty(handle, inode);
- ext4_truncate_restart_trans(handle, inode,
- blocks_for_truncate(inode));
- }
-
- /*
- * The forget flag here is critical because if
- * we are journaling (and not doing data
- * journaling), we have to make sure a revoke
- * record is written to prevent the journal
- * replay from overwriting the (former)
- * indirect block if it gets reallocated as a
- * data block. This must happen in the same
- * transaction where the data blocks are
- * actually freed.
- */
- ext4_free_blocks(handle, inode, NULL, nr, 1,
- EXT4_FREE_BLOCKS_METADATA|
- EXT4_FREE_BLOCKS_FORGET);
-
- if (parent_bh) {
- /*
- * The block which we have just freed is
- * pointed to by an indirect block: journal it
- */
- BUFFER_TRACE(parent_bh, "get_write_access");
- if (!ext4_journal_get_write_access(handle,
- parent_bh)){
- *p = 0;
- BUFFER_TRACE(parent_bh,
- "call ext4_handle_dirty_metadata");
- ext4_handle_dirty_metadata(handle,
- inode,
- parent_bh);
- }
- }
- }
- } else {
- /* We have reached the bottom of the tree. */
- BUFFER_TRACE(parent_bh, "free data blocks");
- ext4_free_data(handle, inode, parent_bh, first, last);
- }
-}
-
int ext4_can_truncate(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
@@ -4476,19 +3129,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
*/
void ext4_truncate(struct inode *inode)
{
- handle_t *handle;
- struct ext4_inode_info *ei = EXT4_I(inode);
- __le32 *i_data = ei->i_data;
- int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
- struct address_space *mapping = inode->i_mapping;
- ext4_lblk_t offsets[4];
- Indirect chain[4];
- Indirect *partial;
- __le32 nr = 0;
- int n = 0;
- ext4_lblk_t last_block, max_block;
- unsigned blocksize = inode->i_sb->s_blocksize;
-
trace_ext4_truncate_enter(inode);
if (!ext4_can_truncate(inode))
@@ -4499,149 +3139,11 @@ void ext4_truncate(struct inode *inode)
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ext4_ext_truncate(inode);
- trace_ext4_truncate_exit(inode);
- return;
- }
-
- handle = start_transaction(inode);
- if (IS_ERR(handle))
- return; /* AKPM: return what? */
-
- last_block = (inode->i_size + blocksize-1)
- >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
- max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
- >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
-
- if (inode->i_size & (blocksize - 1))
- if (ext4_block_truncate_page(handle, mapping, inode->i_size))
- goto out_stop;
-
- if (last_block != max_block) {
- n = ext4_block_to_path(inode, last_block, offsets, NULL);
- if (n == 0)
- goto out_stop; /* error */
- }
-
- /*
- * OK. This truncate is going to happen. We add the inode to the
- * orphan list, so that if this truncate spans multiple transactions,
- * and we crash, we will resume the truncate when the filesystem
- * recovers. It also marks the inode dirty, to catch the new size.
- *
- * Implication: the file must always be in a sane, consistent
- * truncatable state while each transaction commits.
- */
- if (ext4_orphan_add(handle, inode))
- goto out_stop;
-
- /*
- * From here we block out all ext4_get_block() callers who want to
- * modify the block allocation tree.
- */
- down_write(&ei->i_data_sem);
-
- ext4_discard_preallocations(inode);
-
- /*
- * The orphan list entry will now protect us from any crash which
- * occurs before the truncate completes, so it is now safe to propagate
- * the new, shorter inode size (held for now in i_size) into the
- * on-disk inode. We do this via i_disksize, which is the value which
- * ext4 *really* writes onto the disk inode.
- */
- ei->i_disksize = inode->i_size;
-
- if (last_block == max_block) {
- /*
- * It is unnecessary to free any data blocks if last_block is
- * equal to the indirect block limit.
- */
- goto out_unlock;
- } else if (n == 1) { /* direct blocks */
- ext4_free_data(handle, inode, NULL, i_data+offsets[0],
- i_data + EXT4_NDIR_BLOCKS);
- goto do_indirects;
- }
-
- partial = ext4_find_shared(inode, n, offsets, chain, &nr);
- /* Kill the top of shared branch (not detached) */
- if (nr) {
- if (partial == chain) {
- /* Shared branch grows from the inode */
- ext4_free_branches(handle, inode, NULL,
- &nr, &nr+1, (chain+n-1) - partial);
- *partial->p = 0;
- /*
- * We mark the inode dirty prior to restart,
- * and prior to stop. No need for it here.
- */
- } else {
- /* Shared branch grows from an indirect block */
- BUFFER_TRACE(partial->bh, "get_write_access");
- ext4_free_branches(handle, inode, partial->bh,
- partial->p,
- partial->p+1, (chain+n-1) - partial);
- }
- }
- /* Clear the ends of indirect blocks on the shared branch */
- while (partial > chain) {
- ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
- (__le32*)partial->bh->b_data+addr_per_block,
- (chain+n-1) - partial);
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
- partial--;
- }
-do_indirects:
- /* Kill the remaining (whole) subtrees */
- switch (offsets[0]) {
- default:
- nr = i_data[EXT4_IND_BLOCK];
- if (nr) {
- ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
- i_data[EXT4_IND_BLOCK] = 0;
- }
- case EXT4_IND_BLOCK:
- nr = i_data[EXT4_DIND_BLOCK];
- if (nr) {
- ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
- i_data[EXT4_DIND_BLOCK] = 0;
- }
- case EXT4_DIND_BLOCK:
- nr = i_data[EXT4_TIND_BLOCK];
- if (nr) {
- ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
- i_data[EXT4_TIND_BLOCK] = 0;
- }
- case EXT4_TIND_BLOCK:
- ;
- }
-
-out_unlock:
- up_write(&ei->i_data_sem);
- inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
-
- /*
- * In a multi-transaction truncate, we only make the final transaction
- * synchronous
- */
- if (IS_SYNC(inode))
- ext4_handle_sync(handle);
-out_stop:
- /*
- * If this was a simple ftruncate(), and the file will remain alive
- * then we need to clear up the orphan record which we created above.
- * However, if this was a real unlink then we were called by
- * ext4_delete_inode(), and we allow that function to clean up the
- * orphan info for us.
- */
- if (inode->i_nlink)
- ext4_orphan_del(handle, inode);
+ else
+ ext4_ind_truncate(inode);
- ext4_journal_stop(handle);
trace_ext4_truncate_exit(inode);
}
@@ -5012,7 +3514,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
(S_ISLNK(inode->i_mode) &&
!ext4_inode_is_fast_symlink(inode))) {
/* Validate block references which are part of inode */
- ret = ext4_check_inode_blockref(inode);
+ ret = ext4_ind_check_inode(inode);
}
if (ret)
goto bad_inode;
@@ -5459,34 +3961,10 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
-static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
- int chunk)
-{
- int indirects;
-
- /* if nrblocks are contiguous */
- if (chunk) {
- /*
- * With N contiguous data blocks, we need at most
- * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
- * 2 dindirect blocks, and 1 tindirect block
- */
- return DIV_ROUND_UP(nrblocks,
- EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
- }
- /*
- * if nrblocks are not contiguous, worse case, each block touch
- * a indirect block, and each indirect block touch a double indirect
- * block, plus a triple indirect block
- */
- indirects = nrblocks * 2 + 1;
- return indirects;
-}
-
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
+ return ext4_ind_trans_blocks(inode, nrblocks, chunk);
return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 808c554e773..f18bfe37aff 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -202,8 +202,9 @@ setversion_out:
struct super_block *sb = inode->i_sb;
int err, err2=0;
- if (!capable(CAP_SYS_RESOURCE))
- return -EPERM;
+ err = ext4_resize_begin(sb);
+ if (err)
+ return err;
if (get_user(n_blocks_count, (__u32 __user *)arg))
return -EFAULT;
@@ -221,6 +222,7 @@ setversion_out:
if (err == 0)
err = err2;
mnt_drop_write(filp->f_path.mnt);
+ ext4_resize_end(sb);
return err;
}
@@ -271,8 +273,9 @@ mext_out:
struct super_block *sb = inode->i_sb;
int err, err2=0;
- if (!capable(CAP_SYS_RESOURCE))
- return -EPERM;
+ err = ext4_resize_begin(sb);
+ if (err)
+ return err;
if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
sizeof(input)))
@@ -291,6 +294,7 @@ mext_out:
if (err == 0)
err = err2;
mnt_drop_write(filp->f_path.mnt);
+ ext4_resize_end(sb);
return err;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 6ed859d5685..17a5a57c415 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -75,8 +75,8 @@
*
* The inode preallocation space is used looking at the _logical_ start
* block. If only the logical file block falls within the range of prealloc
- * space we will consume the particular prealloc space. This make sure that
- * that the we have contiguous physical blocks representing the file blocks
+ * space we will consume the particular prealloc space. This makes sure that
+ * we have contiguous physical blocks representing the file blocks
*
* The important thing to be noted in case of inode prealloc space is that
* we don't modify the values associated to inode prealloc space except
@@ -84,7 +84,7 @@
*
* If we are not able to find blocks in the inode prealloc space and if we
* have the group allocation flag set then we look at the locality group
- * prealloc space. These are per CPU prealloc list repreasented as
+ * prealloc space. These are per CPU prealloc list represented as
*
* ext4_sb_info.s_locality_groups[smp_processor_id()]
*
@@ -128,12 +128,13 @@
* we are doing a group prealloc we try to normalize the request to
* sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is
* 512 blocks. This can be tuned via
- * /sys/fs/ext4/<partition/mb_group_prealloc. The value is represented in
+ * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
* terms of number of blocks. If we have mounted the file system with -O
* stripe=<value> option the group prealloc request is normalized to the
- * stripe value (sbi->s_stripe)
+ * the smallest multiple of the stripe value (sbi->s_stripe) which is
+ * greater than the default mb_group_prealloc.
*
- * The regular allocator(using the buddy cache) supports few tunables.
+ * The regular allocator (using the buddy cache) supports a few tunables.
*
* /sys/fs/ext4/<partition>/mb_min_to_scan
* /sys/fs/ext4/<partition>/mb_max_to_scan
@@ -152,7 +153,7 @@
* best extent in the found extents. Searching for the blocks starts with
* the group specified as the goal value in allocation context via
* ac_g_ex. Each group is first checked based on the criteria whether it
- * can used for allocation. ext4_mb_good_group explains how the groups are
+ * can be used for allocation. ext4_mb_good_group explains how the groups are
* checked.
*
* Both the prealloc space are getting populated as above. So for the first
@@ -492,10 +493,11 @@ static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
b2 = (unsigned char *) bitmap;
for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
if (b1[i] != b2[i]) {
- printk(KERN_ERR "corruption in group %u "
- "at byte %u(%u): %x in copy != %x "
- "on disk/prealloc\n",
- e4b->bd_group, i, i * 8, b1[i], b2[i]);
+ ext4_msg(e4b->bd_sb, KERN_ERR,
+ "corruption in group %u "
+ "at byte %u(%u): %x in copy != %x "
+ "on disk/prealloc",
+ e4b->bd_group, i, i * 8, b1[i], b2[i]);
BUG();
}
}
@@ -1125,7 +1127,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
grp = ext4_get_group_info(sb, group);
e4b->bd_blkbits = sb->s_blocksize_bits;
- e4b->bd_info = ext4_get_group_info(sb, group);
+ e4b->bd_info = grp;
e4b->bd_sb = sb;
e4b->bd_group = group;
e4b->bd_buddy_page = NULL;
@@ -1281,7 +1283,7 @@ static void mb_clear_bits(void *bm, int cur, int len)
}
}
-static void mb_set_bits(void *bm, int cur, int len)
+void ext4_set_bits(void *bm, int cur, int len)
{
__u32 *addr;
@@ -1510,7 +1512,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
}
mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
- mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
+ ext4_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
mb_check_buddy(e4b);
return ret;
@@ -2223,8 +2225,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
EXT4_DESC_PER_BLOCK_BITS(sb);
meta_group_info = kmalloc(metalen, GFP_KERNEL);
if (meta_group_info == NULL) {
- printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
- "buddy group\n");
+ ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate mem "
+ "for a buddy group");
goto exit_meta_group_info;
}
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
@@ -2237,7 +2239,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
if (meta_group_info[i] == NULL) {
- printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
+ ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate buddy mem");
goto exit_group_info;
}
memset(meta_group_info[i], 0, kmem_cache_size(cachep));
@@ -2279,8 +2281,10 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
exit_group_info:
/* If a meta_group_info table has been allocated, release it now */
- if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
+ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
+ sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
+ }
exit_meta_group_info:
return -ENOMEM;
} /* ext4_mb_add_groupinfo */
@@ -2328,23 +2332,26 @@ static int ext4_mb_init_backend(struct super_block *sb)
/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
* kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
* So a two level scheme suffices for now. */
- sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
+ sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
if (sbi->s_group_info == NULL) {
- printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
+ ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
return -ENOMEM;
}
sbi->s_buddy_cache = new_inode(sb);
if (sbi->s_buddy_cache == NULL) {
- printk(KERN_ERR "EXT4-fs: can't get new inode\n");
+ ext4_msg(sb, KERN_ERR, "can't get new inode");
goto err_freesgi;
}
- sbi->s_buddy_cache->i_ino = get_next_ino();
+ /* To avoid potentially colliding with an valid on-disk inode number,
+ * use EXT4_BAD_INO for the buddy cache inode number. This inode is
+ * not in the inode hash, so it should never be found by iget(), but
+ * this will avoid confusion if it ever shows up during debugging. */
+ sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
for (i = 0; i < ngroups; i++) {
desc = ext4_get_group_desc(sb, i, NULL);
if (desc == NULL) {
- printk(KERN_ERR
- "EXT4-fs: can't read descriptor %u\n", i);
+ ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
goto err_freebuddy;
}
if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
@@ -2362,7 +2369,7 @@ err_freebuddy:
kfree(sbi->s_group_info[i]);
iput(sbi->s_buddy_cache);
err_freesgi:
- kfree(sbi->s_group_info);
+ ext4_kvfree(sbi->s_group_info);
return -ENOMEM;
}
@@ -2404,14 +2411,15 @@ static int ext4_groupinfo_create_slab(size_t size)
slab_size, 0, SLAB_RECLAIM_ACCOUNT,
NULL);
+ ext4_groupinfo_caches[cache_index] = cachep;
+
mutex_unlock(&ext4_grpinfo_slab_create_mutex);
if (!cachep) {
- printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n");
+ printk(KERN_EMERG
+ "EXT4-fs: no memory for groupinfo slab cache\n");
return -ENOMEM;
}
- ext4_groupinfo_caches[cache_index] = cachep;
-
return 0;
}
@@ -2457,12 +2465,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
i++;
} while (i <= sb->s_blocksize_bits + 1);
- /* init file for buddy data */
- ret = ext4_mb_init_backend(sb);
- if (ret != 0) {
- goto out;
- }
-
spin_lock_init(&sbi->s_md_lock);
spin_lock_init(&sbi->s_bal_lock);
@@ -2472,6 +2474,18 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
+ /*
+ * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
+ * to the lowest multiple of s_stripe which is bigger than
+ * the s_mb_group_prealloc as determined above. We want
+ * the preallocation size to be an exact multiple of the
+ * RAID stripe size so that preallocations don't fragment
+ * the stripes.
+ */
+ if (sbi->s_stripe > 1) {
+ sbi->s_mb_group_prealloc = roundup(
+ sbi->s_mb_group_prealloc, sbi->s_stripe);
+ }
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
if (sbi->s_locality_groups == NULL) {
@@ -2487,6 +2501,12 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
spin_lock_init(&lg->lg_prealloc_lock);
}
+ /* init file for buddy data */
+ ret = ext4_mb_init_backend(sb);
+ if (ret != 0) {
+ goto out;
+ }
+
if (sbi->s_proc)
proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
&ext4_mb_seq_groups_fops, sb);
@@ -2544,32 +2564,32 @@ int ext4_mb_release(struct super_block *sb)
EXT4_DESC_PER_BLOCK_BITS(sb);
for (i = 0; i < num_meta_group_infos; i++)
kfree(sbi->s_group_info[i]);
- kfree(sbi->s_group_info);
+ ext4_kvfree(sbi->s_group_info);
}
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
if (sbi->s_buddy_cache)
iput(sbi->s_buddy_cache);
if (sbi->s_mb_stats) {
- printk(KERN_INFO
- "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u blocks %u reqs (%u success)",
atomic_read(&sbi->s_bal_allocated),
atomic_read(&sbi->s_bal_reqs),
atomic_read(&sbi->s_bal_success));
- printk(KERN_INFO
- "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
- "%u 2^N hits, %u breaks, %u lost\n",
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u extents scanned, %u goal hits, "
+ "%u 2^N hits, %u breaks, %u lost",
atomic_read(&sbi->s_bal_ex_scanned),
atomic_read(&sbi->s_bal_goals),
atomic_read(&sbi->s_bal_2orders),
atomic_read(&sbi->s_bal_breaks),
atomic_read(&sbi->s_mb_lost_chunks));
- printk(KERN_INFO
- "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
- sbi->s_mb_buddies_generated++,
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %lu generated and it took %Lu",
+ sbi->s_mb_buddies_generated,
sbi->s_mb_generation_time);
- printk(KERN_INFO
- "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u preallocated, %u discarded",
atomic_read(&sbi->s_mb_preallocated),
atomic_read(&sbi->s_mb_discarded));
}
@@ -2628,6 +2648,15 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
rb_erase(&entry->node, &(db->bb_free_root));
mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
+ /*
+ * Clear the trimmed flag for the group so that the next
+ * ext4_trim_fs can trim it.
+ * If the volume is mounted with -o discard, online discard
+ * is supported and the free blocks will be trimmed online.
+ */
+ if (!test_opt(sb, DISCARD))
+ EXT4_MB_GRP_CLEAR_TRIMMED(db);
+
if (!db->bb_free_root.rb_node) {
/* No more items in the per group rb tree
* balance refcounts from ext4_mb_free_metadata()
@@ -2771,8 +2800,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
* We leak some of the blocks here.
*/
ext4_lock_group(sb, ac->ac_b_ex.fe_group);
- mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
- ac->ac_b_ex.fe_len);
+ ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
+ ac->ac_b_ex.fe_len);
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!err)
@@ -2790,7 +2819,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
}
}
#endif
- mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
+ ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
+ ac->ac_b_ex.fe_len);
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_blks_set(sb, gdp,
@@ -2830,8 +2860,9 @@ out_err:
/*
* here we normalize request for locality group
- * Group request are normalized to s_strip size if we set the same via mount
- * option. If not we set it to s_mb_group_prealloc which can be configured via
+ * Group request are normalized to s_mb_group_prealloc, which goes to
+ * s_strip if we set the same via mount option.
+ * s_mb_group_prealloc can be configured via
* /sys/fs/ext4/<partition>/mb_group_prealloc
*
* XXX: should we try to preallocate more than the group has now?
@@ -2842,10 +2873,7 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
struct ext4_locality_group *lg = ac->ac_lg;
BUG_ON(lg == NULL);
- if (EXT4_SB(sb)->s_stripe)
- ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
- else
- ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
+ ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
mb_debug(1, "#%u: goal %u blocks for locality group\n",
current->pid, ac->ac_g_ex.fe_len);
}
@@ -3001,9 +3029,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
if (start + size <= ac->ac_o_ex.fe_logical &&
start > ac->ac_o_ex.fe_logical) {
- printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
- (unsigned long) start, (unsigned long) size,
- (unsigned long) ac->ac_o_ex.fe_logical);
+ ext4_msg(ac->ac_sb, KERN_ERR,
+ "start %lu, size %lu, fe_logical %lu",
+ (unsigned long) start, (unsigned long) size,
+ (unsigned long) ac->ac_o_ex.fe_logical);
}
BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
start > ac->ac_o_ex.fe_logical);
@@ -3262,7 +3291,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
while (n) {
entry = rb_entry(n, struct ext4_free_data, node);
- mb_set_bits(bitmap, entry->start_blk, entry->count);
+ ext4_set_bits(bitmap, entry->start_blk, entry->count);
n = rb_next(n);
}
return;
@@ -3304,7 +3333,7 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
if (unlikely(len == 0))
continue;
BUG_ON(groupnr != group);
- mb_set_bits(bitmap, start, len);
+ ext4_set_bits(bitmap, start, len);
preallocated += len;
count++;
}
@@ -3584,10 +3613,11 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
bit = next + 1;
}
if (free != pa->pa_free) {
- printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
- pa, (unsigned long) pa->pa_lstart,
- (unsigned long) pa->pa_pstart,
- (unsigned long) pa->pa_len);
+ ext4_msg(e4b->bd_sb, KERN_CRIT,
+ "pa %p: logic %lu, phys. %lu, len %lu",
+ pa, (unsigned long) pa->pa_lstart,
+ (unsigned long) pa->pa_pstart,
+ (unsigned long) pa->pa_len);
ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
free, pa->pa_free);
/*
@@ -3775,7 +3805,8 @@ repeat:
* use preallocation while we're discarding it */
spin_unlock(&pa->pa_lock);
spin_unlock(&ei->i_prealloc_lock);
- printk(KERN_ERR "uh-oh! used pa while discarding\n");
+ ext4_msg(sb, KERN_ERR,
+ "uh-oh! used pa while discarding");
WARN_ON(1);
schedule_timeout_uninterruptible(HZ);
goto repeat;
@@ -3852,12 +3883,13 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
return;
- printk(KERN_ERR "EXT4-fs: Can't allocate:"
- " Allocation context details:\n");
- printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: Can't allocate:"
+ " Allocation context details:");
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: status %d flags %d",
ac->ac_status, ac->ac_flags);
- printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
- "best %lu/%lu/%lu@%lu cr %d\n",
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: orig %lu/%lu/%lu@%lu, "
+ "goal %lu/%lu/%lu@%lu, "
+ "best %lu/%lu/%lu@%lu cr %d",
(unsigned long)ac->ac_o_ex.fe_group,
(unsigned long)ac->ac_o_ex.fe_start,
(unsigned long)ac->ac_o_ex.fe_len,
@@ -3871,9 +3903,9 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
(unsigned long)ac->ac_b_ex.fe_len,
(unsigned long)ac->ac_b_ex.fe_logical,
(int)ac->ac_criteria);
- printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
- ac->ac_found);
- printk(KERN_ERR "EXT4-fs: groups: \n");
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: %lu scanned, %d found",
+ ac->ac_ex_scanned, ac->ac_found);
+ ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: groups: ");
ngroups = ext4_get_groups_count(sb);
for (i = 0; i < ngroups; i++) {
struct ext4_group_info *grp = ext4_get_group_info(sb, i);
@@ -4637,7 +4669,7 @@ do_more:
}
ext4_mark_super_dirty(sb);
error_return:
- if (freed)
+ if (freed && !(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
dquot_free_block(inode, freed);
brelse(bitmap_bh);
ext4_std_error(sb, err);
@@ -4645,7 +4677,7 @@ error_return:
}
/**
- * ext4_add_groupblocks() -- Add given blocks to an existing group
+ * ext4_group_add_blocks() -- Add given blocks to an existing group
* @handle: handle to this transaction
* @sb: super block
* @block: start physcial block to add to the block group
@@ -4653,7 +4685,7 @@ error_return:
*
* This marks the blocks as free in the bitmap and buddy.
*/
-void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
+int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_fsblk_t block, unsigned long count)
{
struct buffer_head *bitmap_bh = NULL;
@@ -4666,25 +4698,35 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
struct ext4_buddy e4b;
int err = 0, ret, blk_free_count;
ext4_grpblk_t blocks_freed;
- struct ext4_group_info *grp;
ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
+ if (count == 0)
+ return 0;
+
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
- grp = ext4_get_group_info(sb, block_group);
/*
* Check to see if we are freeing blocks across a group
* boundary.
*/
- if (bit + count > EXT4_BLOCKS_PER_GROUP(sb))
+ if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
+ ext4_warning(sb, "too much blocks added to group %u\n",
+ block_group);
+ err = -EINVAL;
goto error_return;
+ }
bitmap_bh = ext4_read_block_bitmap(sb, block_group);
- if (!bitmap_bh)
+ if (!bitmap_bh) {
+ err = -EIO;
goto error_return;
+ }
+
desc = ext4_get_group_desc(sb, block_group, &gd_bh);
- if (!desc)
+ if (!desc) {
+ err = -EIO;
goto error_return;
+ }
if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
in_range(ext4_inode_bitmap(sb, desc), block, count) ||
@@ -4694,6 +4736,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
ext4_error(sb, "Adding blocks in system zones - "
"Block = %llu, count = %lu",
block, count);
+ err = -EINVAL;
goto error_return;
}
@@ -4762,7 +4805,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
error_return:
brelse(bitmap_bh);
ext4_std_error(sb, err);
- return;
+ return err;
}
/**
@@ -4782,6 +4825,8 @@ static void ext4_trim_extent(struct super_block *sb, int start, int count,
{
struct ext4_free_extent ex;
+ trace_ext4_trim_extent(sb, group, start, count);
+
assert_spin_locked(ext4_group_lock_ptr(sb, group));
ex.fe_start = start;
@@ -4802,7 +4847,7 @@ static void ext4_trim_extent(struct super_block *sb, int start, int count,
/**
* ext4_trim_all_free -- function to trim all free space in alloc. group
* @sb: super block for file system
- * @e4b: ext4 buddy
+ * @group: group to be trimmed
* @start: first group block to examine
* @max: last group block to examine
* @minblocks: minimum extent block count
@@ -4823,10 +4868,12 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_grpblk_t minblocks)
{
void *bitmap;
- ext4_grpblk_t next, count = 0;
+ ext4_grpblk_t next, count = 0, free_count = 0;
struct ext4_buddy e4b;
int ret;
+ trace_ext4_trim_all_free(sb, group, start, max);
+
ret = ext4_mb_load_buddy(sb, group, &e4b);
if (ret) {
ext4_error(sb, "Error in loading buddy "
@@ -4836,6 +4883,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
bitmap = e4b.bd_bitmap;
ext4_lock_group(sb, group);
+ if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
+ minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
+ goto out;
+
start = (e4b.bd_info->bb_first_free > start) ?
e4b.bd_info->bb_first_free : start;
@@ -4850,6 +4901,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
next - start, group, &e4b);
count += next - start;
}
+ free_count += next - start;
start = next + 1;
if (fatal_signal_pending(current)) {
@@ -4863,9 +4915,13 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_lock_group(sb, group);
}
- if ((e4b.bd_info->bb_free - count) < minblocks)
+ if ((e4b.bd_info->bb_free - free_count) < minblocks)
break;
}
+
+ if (!ret)
+ EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
+out:
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
@@ -4904,6 +4960,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
return -EINVAL;
+ if (start + len <= first_data_blk)
+ goto out;
if (start < first_data_blk) {
len -= first_data_blk - start;
start = first_data_blk;
@@ -4952,5 +5010,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
}
range->len = trimmed * sb->s_blocksize;
+ if (!ret)
+ atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
+
+out:
return ret;
}
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 20b5e7bfebd..9d4a636b546 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -187,7 +187,6 @@ struct ext4_allocation_context {
__u16 ac_flags; /* allocation hints */
__u8 ac_status;
__u8 ac_criteria;
- __u8 ac_repeats;
__u8 ac_2order; /* if request is to allocate 2^N blocks and
* N > 0, the field stores N, otherwise 0 */
__u8 ac_op; /* operation, for history only */
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 8c9babac43d..565a154e22d 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -289,7 +289,7 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_ent
while (len--) printk("%c", *name++);
ext4fs_dirhash(de->name, de->name_len, &h);
printk(":%x.%u ", h.hash,
- ((char *) de - base));
+ (unsigned) ((char *) de - base));
}
space += EXT4_DIR_REC_LEN(de->name_len);
names++;
@@ -1013,7 +1013,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
*err = -ENOENT;
errout:
- dxtrace(printk(KERN_DEBUG "%s not found\n", name));
+ dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
dx_release (frames);
return NULL;
}
@@ -1985,18 +1985,11 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
if (!list_empty(&EXT4_I(inode)->i_orphan))
goto out_unlock;
- /* Orphan handling is only valid for files with data blocks
- * being truncated, or files being unlinked. */
-
- /* @@@ FIXME: Observation from aviro:
- * I think I can trigger J_ASSERT in ext4_orphan_add(). We block
- * here (on s_orphan_lock), so race with ext4_link() which might bump
- * ->i_nlink. For, say it, character device. Not a regular file,
- * not a directory, not a symlink and ->i_nlink > 0.
- *
- * tytso, 4/25/2009: I'm not sure how that could happen;
- * shouldn't the fs core protect us from these sort of
- * unlink()/link() races?
+ /*
+ * Orphan handling is only valid for files with data blocks
+ * being truncated, or files being unlinked. Note that we either
+ * hold i_mutex, or the inode can not be referenced from outside,
+ * so i_nlink should not be bumped due to race
*/
J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 7bb8f76d470..430c401d089 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -285,11 +285,7 @@ static int io_submit_init(struct ext4_io_submit *io,
io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_end)
return -ENOMEM;
- do {
- bio = bio_alloc(GFP_NOIO, nvecs);
- nvecs >>= 1;
- } while (bio == NULL);
-
+ bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
bio->bi_private = io->io_end = io_end;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 80bbc9c60c2..707d3f16f7c 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -16,6 +16,35 @@
#include "ext4_jbd2.h"
+int ext4_resize_begin(struct super_block *sb)
+{
+ int ret = 0;
+
+ if (!capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ /*
+ * We are not allowed to do online-resizing on a filesystem mounted
+ * with error, because it can destroy the filesystem easily.
+ */
+ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
+ ext4_warning(sb, "There are errors in the filesystem, "
+ "so online resizing is not allowed\n");
+ return -EPERM;
+ }
+
+ if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags))
+ ret = -EBUSY;
+
+ return ret;
+}
+
+void ext4_resize_end(struct super_block *sb)
+{
+ clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
+ smp_mb__after_clear_bit();
+}
+
#define outside(b, first, last) ((b) < (first) || (b) >= (last))
#define inside(b, first, last) ((b) >= (first) && (b) < (last))
@@ -118,10 +147,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
brelse(bh);
bh = ERR_PTR(err);
} else {
- lock_buffer(bh);
memset(bh->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bh);
- unlock_buffer(bh);
}
return bh;
@@ -132,8 +159,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
* If that fails, restart the transaction & regain write access for the
* buffer head which is used for block_bitmap modifications.
*/
-static int extend_or_restart_transaction(handle_t *handle, int thresh,
- struct buffer_head *bh)
+static int extend_or_restart_transaction(handle_t *handle, int thresh)
{
int err;
@@ -144,9 +170,8 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh,
if (err < 0)
return err;
if (err) {
- if ((err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
- return err;
- if ((err = ext4_journal_get_write_access(handle, bh)))
+ err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
+ if (err)
return err;
}
@@ -181,21 +206,7 @@ static int setup_new_group_blocks(struct super_block *sb,
if (IS_ERR(handle))
return PTR_ERR(handle);
- mutex_lock(&sbi->s_resize_lock);
- if (input->group != sbi->s_groups_count) {
- err = -EBUSY;
- goto exit_journal;
- }
-
- if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
- err = PTR_ERR(bh);
- goto exit_journal;
- }
-
- if (ext4_bg_has_super(sb, input->group)) {
- ext4_debug("mark backup superblock %#04llx (+0)\n", start);
- ext4_set_bit(0, bh->b_data);
- }
+ BUG_ON(input->group != sbi->s_groups_count);
/* Copy all of the GDT blocks into the backup in this group */
for (i = 0, bit = 1, block = start + 1;
@@ -203,29 +214,26 @@ static int setup_new_group_blocks(struct super_block *sb,
struct buffer_head *gdb;
ext4_debug("update backup group %#04llx (+%d)\n", block, bit);
-
- if ((err = extend_or_restart_transaction(handle, 1, bh)))
- goto exit_bh;
+ err = extend_or_restart_transaction(handle, 1);
+ if (err)
+ goto exit_journal;
gdb = sb_getblk(sb, block);
if (!gdb) {
err = -EIO;
- goto exit_bh;
+ goto exit_journal;
}
if ((err = ext4_journal_get_write_access(handle, gdb))) {
brelse(gdb);
- goto exit_bh;
+ goto exit_journal;
}
- lock_buffer(gdb);
memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
set_buffer_uptodate(gdb);
- unlock_buffer(gdb);
err = ext4_handle_dirty_metadata(handle, NULL, gdb);
if (unlikely(err)) {
brelse(gdb);
- goto exit_bh;
+ goto exit_journal;
}
- ext4_set_bit(bit, bh->b_data);
brelse(gdb);
}
@@ -235,9 +243,22 @@ static int setup_new_group_blocks(struct super_block *sb,
err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
GFP_NOFS);
if (err)
- goto exit_bh;
- for (i = 0, bit = gdblocks + 1; i < reserved_gdb; i++, bit++)
- ext4_set_bit(bit, bh->b_data);
+ goto exit_journal;
+
+ err = extend_or_restart_transaction(handle, 2);
+ if (err)
+ goto exit_journal;
+
+ bh = bclean(handle, sb, input->block_bitmap);
+ if (IS_ERR(bh)) {
+ err = PTR_ERR(bh);
+ goto exit_journal;
+ }
+
+ if (ext4_bg_has_super(sb, input->group)) {
+ ext4_debug("mark backup group tables %#04llx (+0)\n", start);
+ ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb + 1);
+ }
ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
input->block_bitmap - start);
@@ -253,12 +274,9 @@ static int setup_new_group_blocks(struct super_block *sb,
err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
if (err)
goto exit_bh;
- for (i = 0, bit = input->inode_table - start;
- i < sbi->s_itb_per_group; i++, bit++)
- ext4_set_bit(bit, bh->b_data);
+ ext4_set_bits(bh->b_data, input->inode_table - start,
+ sbi->s_itb_per_group);
- if ((err = extend_or_restart_transaction(handle, 2, bh)))
- goto exit_bh;
ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
bh->b_data);
@@ -285,7 +303,6 @@ exit_bh:
brelse(bh);
exit_journal:
- mutex_unlock(&sbi->s_resize_lock);
if ((err2 = ext4_journal_stop(handle)) && !err)
err = err2;
@@ -377,15 +394,15 @@ static int verify_reserved_gdb(struct super_block *sb,
* fail once we start modifying the data on disk, because JBD has no rollback.
*/
static int add_new_gdb(handle_t *handle, struct inode *inode,
- struct ext4_new_group_data *input,
- struct buffer_head **primary)
+ ext4_group_t group)
{
struct super_block *sb = inode->i_sb;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
- unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
+ unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
struct buffer_head **o_group_desc, **n_group_desc;
struct buffer_head *dind;
+ struct buffer_head *gdb_bh;
int gdbackups;
struct ext4_iloc iloc;
__le32 *data;
@@ -408,11 +425,12 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
return -EPERM;
}
- *primary = sb_bread(sb, gdblock);
- if (!*primary)
+ gdb_bh = sb_bread(sb, gdblock);
+ if (!gdb_bh)
return -EIO;
- if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
+ gdbackups = verify_reserved_gdb(sb, gdb_bh);
+ if (gdbackups < 0) {
err = gdbackups;
goto exit_bh;
}
@@ -427,7 +445,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
data = (__le32 *)dind->b_data;
if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
ext4_warning(sb, "new group %u GDT block %llu not reserved",
- input->group, gdblock);
+ group, gdblock);
err = -EINVAL;
goto exit_dind;
}
@@ -436,7 +454,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
if (unlikely(err))
goto exit_dind;
- err = ext4_journal_get_write_access(handle, *primary);
+ err = ext4_journal_get_write_access(handle, gdb_bh);
if (unlikely(err))
goto exit_sbh;
@@ -449,12 +467,13 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
if (unlikely(err))
goto exit_dindj;
- n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
- GFP_NOFS);
+ n_group_desc = ext4_kvmalloc((gdb_num + 1) *
+ sizeof(struct buffer_head *),
+ GFP_NOFS);
if (!n_group_desc) {
err = -ENOMEM;
- ext4_warning(sb,
- "not enough memory for %lu groups", gdb_num + 1);
+ ext4_warning(sb, "not enough memory for %lu groups",
+ gdb_num + 1);
goto exit_inode;
}
@@ -475,8 +494,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
}
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
ext4_mark_iloc_dirty(handle, inode, &iloc);
- memset((*primary)->b_data, 0, sb->s_blocksize);
- err = ext4_handle_dirty_metadata(handle, NULL, *primary);
+ memset(gdb_bh->b_data, 0, sb->s_blocksize);
+ err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) {
ext4_std_error(sb, err);
goto exit_inode;
@@ -486,10 +505,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
o_group_desc = EXT4_SB(sb)->s_group_desc;
memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
- n_group_desc[gdb_num] = *primary;
+ n_group_desc[gdb_num] = gdb_bh;
EXT4_SB(sb)->s_group_desc = n_group_desc;
EXT4_SB(sb)->s_gdb_count++;
- kfree(o_group_desc);
+ ext4_kvfree(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
@@ -499,6 +518,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
return err;
exit_inode:
+ ext4_kvfree(n_group_desc);
/* ext4_handle_release_buffer(handle, iloc.bh); */
brelse(iloc.bh);
exit_dindj:
@@ -508,7 +528,7 @@ exit_sbh:
exit_dind:
brelse(dind);
exit_bh:
- brelse(*primary);
+ brelse(gdb_bh);
ext4_debug("leaving with error %d\n", err);
return err;
@@ -528,7 +548,7 @@ exit_bh:
* backup GDT blocks are stored in their reserved primary GDT block.
*/
static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
- struct ext4_new_group_data *input)
+ ext4_group_t group)
{
struct super_block *sb = inode->i_sb;
int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
@@ -599,7 +619,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
* Finally we can add each of the reserved backup GDT blocks from
* the new group to its reserved primary GDT block.
*/
- blk = input->group * EXT4_BLOCKS_PER_GROUP(sb);
+ blk = group * EXT4_BLOCKS_PER_GROUP(sb);
for (i = 0; i < reserved_gdb; i++) {
int err2;
data = (__le32 *)primary[i]->b_data;
@@ -799,13 +819,6 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
goto exit_put;
}
- mutex_lock(&sbi->s_resize_lock);
- if (input->group != sbi->s_groups_count) {
- ext4_warning(sb, "multiple resizers run on filesystem!");
- err = -EBUSY;
- goto exit_journal;
- }
-
if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
goto exit_journal;
@@ -820,16 +833,25 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
if ((err = ext4_journal_get_write_access(handle, primary)))
goto exit_journal;
- if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) &&
- (err = reserve_backup_gdb(handle, inode, input)))
+ if (reserved_gdb && ext4_bg_num_gdb(sb, input->group)) {
+ err = reserve_backup_gdb(handle, inode, input->group);
+ if (err)
+ goto exit_journal;
+ }
+ } else {
+ /*
+ * Note that we can access new group descriptor block safely
+ * only if add_new_gdb() succeeds.
+ */
+ err = add_new_gdb(handle, inode, input->group);
+ if (err)
goto exit_journal;
- } else if ((err = add_new_gdb(handle, inode, input, &primary)))
- goto exit_journal;
+ primary = sbi->s_group_desc[gdb_num];
+ }
/*
* OK, now we've set up the new group. Time to make it active.
*
- * We do not lock all allocations via s_resize_lock
* so we have to be safe wrt. concurrent accesses the group
* data. So we need to be careful to set all of the relevant
* group descriptor data etc. *before* we enable the group.
@@ -886,13 +908,9 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
*
* The precise rules we use are:
*
- * * Writers of s_groups_count *must* hold s_resize_lock
- * AND
* * Writers must perform a smp_wmb() after updating all dependent
* data and before modifying the groups count
*
- * * Readers must hold s_resize_lock over the access
- * OR
* * Readers must perform an smp_rmb() after reading the groups count
* and before reading any dependent data.
*
@@ -937,10 +955,9 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
ext4_handle_dirty_super(handle, sb);
exit_journal:
- mutex_unlock(&sbi->s_resize_lock);
if ((err2 = ext4_journal_stop(handle)) && !err)
err = err2;
- if (!err) {
+ if (!err && primary) {
update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
sizeof(struct ext4_super_block));
update_backups(sb, primary->b_blocknr, primary->b_data,
@@ -969,16 +986,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
ext4_grpblk_t add;
struct buffer_head *bh;
handle_t *handle;
- int err;
+ int err, err2;
ext4_group_t group;
- /* We don't need to worry about locking wrt other resizers just
- * yet: we're going to revalidate es->s_blocks_count after
- * taking the s_resize_lock below. */
o_blocks_count = ext4_blocks_count(es);
if (test_opt(sb, DEBUG))
- printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n",
+ printk(KERN_DEBUG "EXT4-fs: extending last group from %llu to %llu blocks\n",
o_blocks_count, n_blocks_count);
if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
@@ -995,7 +1009,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
if (n_blocks_count < o_blocks_count) {
ext4_warning(sb, "can't shrink FS - resize aborted");
- return -EBUSY;
+ return -EINVAL;
}
/* Handle the remaining blocks in the last group only. */
@@ -1038,32 +1052,25 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
goto exit_put;
}
- mutex_lock(&EXT4_SB(sb)->s_resize_lock);
- if (o_blocks_count != ext4_blocks_count(es)) {
- ext4_warning(sb, "multiple resizers run on filesystem!");
- mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
- ext4_journal_stop(handle);
- err = -EBUSY;
- goto exit_put;
- }
-
if ((err = ext4_journal_get_write_access(handle,
EXT4_SB(sb)->s_sbh))) {
ext4_warning(sb, "error %d on journal write access", err);
- mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
ext4_journal_stop(handle);
goto exit_put;
}
ext4_blocks_count_set(es, o_blocks_count + add);
- mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
o_blocks_count + add);
/* We add the blocks to the bitmap and set the group need init bit */
- ext4_add_groupblocks(handle, sb, o_blocks_count, add);
+ err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
ext4_handle_dirty_super(handle, sb);
ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
o_blocks_count + add);
- if ((err = ext4_journal_stop(handle)))
+ err2 = ext4_journal_stop(handle);
+ if (!err && err2)
+ err = err2;
+
+ if (err)
goto exit_put;
if (test_opt(sb, DEBUG))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9ea71aa864b..4687fea0c00 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -110,6 +110,35 @@ static struct file_system_type ext3_fs_type = {
#define IS_EXT3_SB(sb) (0)
#endif
+void *ext4_kvmalloc(size_t size, gfp_t flags)
+{
+ void *ret;
+
+ ret = kmalloc(size, flags);
+ if (!ret)
+ ret = __vmalloc(size, flags, PAGE_KERNEL);
+ return ret;
+}
+
+void *ext4_kvzalloc(size_t size, gfp_t flags)
+{
+ void *ret;
+
+ ret = kzalloc(size, flags);
+ if (!ret)
+ ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
+ return ret;
+}
+
+void ext4_kvfree(void *ptr)
+{
+ if (is_vmalloc_addr(ptr))
+ vfree(ptr);
+ else
+ kfree(ptr);
+
+}
+
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
{
@@ -269,6 +298,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
journal_t *journal;
handle_t *handle;
+ trace_ext4_journal_start(sb, nblocks, _RET_IP_);
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
@@ -789,11 +819,8 @@ static void ext4_put_super(struct super_block *sb)
for (i = 0; i < sbi->s_gdb_count; i++)
brelse(sbi->s_group_desc[i]);
- kfree(sbi->s_group_desc);
- if (is_vmalloc_addr(sbi->s_flex_groups))
- vfree(sbi->s_flex_groups);
- else
- kfree(sbi->s_flex_groups);
+ ext4_kvfree(sbi->s_group_desc);
+ ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
@@ -1976,15 +2003,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
size = flex_group_count * sizeof(struct flex_groups);
- sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
+ sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL);
if (sbi->s_flex_groups == NULL) {
- sbi->s_flex_groups = vzalloc(size);
- if (sbi->s_flex_groups == NULL) {
- ext4_msg(sb, KERN_ERR,
- "not enough memory for %u flex groups",
- flex_group_count);
- goto failed;
- }
+ ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups",
+ flex_group_count);
+ goto failed;
}
for (i = 0; i < sbi->s_groups_count; i++) {
@@ -2383,17 +2406,25 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
unsigned long stripe_width =
le32_to_cpu(sbi->s_es->s_raid_stripe_width);
+ int ret;
if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
- return sbi->s_stripe;
-
- if (stripe_width <= sbi->s_blocks_per_group)
- return stripe_width;
+ ret = sbi->s_stripe;
+ else if (stripe_width <= sbi->s_blocks_per_group)
+ ret = stripe_width;
+ else if (stride <= sbi->s_blocks_per_group)
+ ret = stride;
+ else
+ ret = 0;
- if (stride <= sbi->s_blocks_per_group)
- return stride;
+ /*
+ * If the stripe width is 1, this makes no sense and
+ * we set it to 0 to turn off stripe handling code.
+ */
+ if (ret <= 1)
+ ret = 0;
- return 0;
+ return ret;
}
/* sysfs supprt */
@@ -3408,8 +3439,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
- sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
- GFP_KERNEL);
+ sbi->s_group_desc = ext4_kvmalloc(db_count *
+ sizeof(struct buffer_head *),
+ GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory");
goto failed_mount;
@@ -3491,7 +3523,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
- mutex_init(&sbi->s_resize_lock);
+ sbi->s_resize_flags = 0;
sb->s_root = NULL;
@@ -3741,12 +3773,8 @@ failed_mount_wq:
}
failed_mount3:
del_timer(&sbi->s_err_report);
- if (sbi->s_flex_groups) {
- if (is_vmalloc_addr(sbi->s_flex_groups))
- vfree(sbi->s_flex_groups);
- else
- kfree(sbi->s_flex_groups);
- }
+ if (sbi->s_flex_groups)
+ ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
@@ -3756,7 +3784,7 @@ failed_mount3:
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
- kfree(sbi->s_group_desc);
+ ext4_kvfree(sbi->s_group_desc);
failed_mount:
if (sbi->s_proc) {
remove_proc_entry(sb->s_id, ext4_proc_root);
diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
new file mode 100644
index 00000000000..011ba6670d9
--- /dev/null
+++ b/fs/ext4/truncate.h
@@ -0,0 +1,43 @@
+/*
+ * linux/fs/ext4/truncate.h
+ *
+ * Common inline functions needed for truncate support
+ */
+
+/*
+ * Truncate blocks that were not used by write. We have to truncate the
+ * pagecache as well so that corresponding buffers get properly unmapped.
+ */
+static inline void ext4_truncate_failed_write(struct inode *inode)
+{
+ truncate_inode_pages(inode->i_mapping, inode->i_size);
+ ext4_truncate(inode);
+}
+
+/*
+ * Work out how many blocks we need to proceed with the next chunk of a
+ * truncate transaction.
+ */
+static inline unsigned long ext4_blocks_for_truncate(struct inode *inode)
+{
+ ext4_lblk_t needed;
+
+ needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
+
+ /* Give ourselves just enough room to cope with inodes in which
+ * i_blocks is corrupt: we've seen disk corruptions in the past
+ * which resulted in random data in an inode which looked enough
+ * like a regular file for ext4 to try to delete it. Things
+ * will go a bit crazy if that happens, but at least we should
+ * try not to panic the whole kernel. */
+ if (needed < 2)
+ needed = 2;
+
+ /* But we need to bound the transaction so we don't overflow the
+ * journal. */
+ if (needed > EXT4_MAX_TRANS_DATA)
+ needed = EXT4_MAX_TRANS_DATA;
+
+ return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
+}
+
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1599aa985fe..04cf3b91e50 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -618,7 +618,12 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
struct super_block *sb = inode->i_sb;
if (!grab_super_passive(sb)) {
- requeue_io(inode, wb);
+ /*
+ * grab_super_passive() may fail consistently due to
+ * s_umount being grabbed by someone else. Don't use
+ * requeue_io() to avoid busy retrying the inode/sb.
+ */
+ redirty_tail(inode, wb);
continue;
}
wrote += writeback_sb_inodes(sb, wb, work);
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index d5e33a077a6..d0dddaceac5 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -82,18 +82,14 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
return PTR_ERR(acl);
}
if (acl) {
- mode_t mode;
-
error = posix_acl_valid(acl);
if (error)
goto failed;
switch (type) {
case ACL_TYPE_ACCESS:
- mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
goto failed;
- inode->i_mode = mode;
inode->i_ctime = CURRENT_TIME;
if (error == 0) {
posix_acl_release(acl);
@@ -125,21 +121,20 @@ int
generic_acl_init(struct inode *inode, struct inode *dir)
{
struct posix_acl *acl = NULL;
- mode_t mode = inode->i_mode;
int error;
- inode->i_mode = mode & ~current_umask();
if (!S_ISLNK(inode->i_mode))
acl = get_cached_acl(dir, ACL_TYPE_DEFAULT);
if (acl) {
if (S_ISDIR(inode->i_mode))
set_cached_acl(inode, ACL_TYPE_DEFAULT, acl);
- error = posix_acl_create(&acl, GFP_KERNEL, &mode);
+ error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
if (error < 0)
return error;
- inode->i_mode = mode;
if (error > 0)
set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
+ } else {
+ inode->i_mode &= ~current_umask();
}
error = 0;
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 884c9af0542..34501b64bc4 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -72,7 +72,7 @@ struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
return gfs2_acl_get(GFS2_I(inode), type);
}
-static int gfs2_set_mode(struct inode *inode, mode_t mode)
+static int gfs2_set_mode(struct inode *inode, umode_t mode)
{
int error = 0;
@@ -117,7 +117,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct posix_acl *acl;
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
int error = 0;
if (!sdp->sd_args.ar_posix_acl)
@@ -276,7 +276,7 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
goto out_release;
if (type == ACL_TYPE_ACCESS) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
if (error <= 0) {
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 8635be5ffd9..970ea987b3f 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -16,6 +16,7 @@
#include <linux/statfs.h>
#include <linux/types.h>
#include <linux/pid_namespace.h>
+#include <linux/namei.h>
#include <asm/uaccess.h>
#include "os.h"
diff --git a/fs/inode.c b/fs/inode.c
index d0c72ff6b30..73920d555c8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -143,6 +143,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_op = &empty_iops;
inode->i_fop = &empty_fops;
inode->i_nlink = 1;
+ inode->i_opflags = 0;
inode->i_uid = 0;
inode->i_gid = 0;
atomic_set(&inode->i_writecount, 0);
@@ -399,12 +400,12 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
EXPORT_SYMBOL(__insert_inode_hash);
/**
- * remove_inode_hash - remove an inode from the hash
+ * __remove_inode_hash - remove an inode from the hash
* @inode: inode to unhash
*
* Remove an inode from the superblock.
*/
-void remove_inode_hash(struct inode *inode)
+void __remove_inode_hash(struct inode *inode)
{
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
@@ -412,7 +413,7 @@ void remove_inode_hash(struct inode *inode)
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
-EXPORT_SYMBOL(remove_inode_hash);
+EXPORT_SYMBOL(__remove_inode_hash);
void end_writeback(struct inode *inode)
{
@@ -454,7 +455,9 @@ static void evict(struct inode *inode)
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(!list_empty(&inode->i_lru));
- inode_wb_list_del(inode);
+ if (!list_empty(&inode->i_wb_list))
+ inode_wb_list_del(inode);
+
inode_sb_list_del(inode);
if (op->evict_inode) {
@@ -1328,7 +1331,8 @@ static void iput_final(struct inode *inode)
}
inode->i_state |= I_FREEING;
- inode_lru_list_del(inode);
+ if (!list_empty(&inode->i_lru))
+ inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
evict(inode);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 2c62c5aae82..16a698bd906 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -257,9 +257,12 @@ static void
__flush_batch(journal_t *journal, int *batch_count)
{
int i;
+ struct blk_plug plug;
+ blk_start_plug(&plug);
for (i = 0; i < *batch_count; i++)
- write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
+ write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC);
+ blk_finish_plug(&plug);
for (i = 0; i < *batch_count; i++) {
struct buffer_head *bh = journal->j_chkpt_bhs[i];
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 0dfa5b598e6..f24df13adc4 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2390,73 +2390,6 @@ static void __exit journal_exit(void)
jbd2_journal_destroy_caches();
}
-/*
- * jbd2_dev_to_name is a utility function used by the jbd2 and ext4
- * tracing infrastructure to map a dev_t to a device name.
- *
- * The caller should use rcu_read_lock() in order to make sure the
- * device name stays valid until its done with it. We use
- * rcu_read_lock() as well to make sure we're safe in case the caller
- * gets sloppy, and because rcu_read_lock() is cheap and can be safely
- * nested.
- */
-struct devname_cache {
- struct rcu_head rcu;
- dev_t device;
- char devname[BDEVNAME_SIZE];
-};
-#define CACHE_SIZE_BITS 6
-static struct devname_cache *devcache[1 << CACHE_SIZE_BITS];
-static DEFINE_SPINLOCK(devname_cache_lock);
-
-static void free_devcache(struct rcu_head *rcu)
-{
- kfree(rcu);
-}
-
-const char *jbd2_dev_to_name(dev_t device)
-{
- int i = hash_32(device, CACHE_SIZE_BITS);
- char *ret;
- struct block_device *bd;
- static struct devname_cache *new_dev;
-
- rcu_read_lock();
- if (devcache[i] && devcache[i]->device == device) {
- ret = devcache[i]->devname;
- rcu_read_unlock();
- return ret;
- }
- rcu_read_unlock();
-
- new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
- if (!new_dev)
- return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
- bd = bdget(device);
- spin_lock(&devname_cache_lock);
- if (devcache[i]) {
- if (devcache[i]->device == device) {
- kfree(new_dev);
- bdput(bd);
- ret = devcache[i]->devname;
- spin_unlock(&devname_cache_lock);
- return ret;
- }
- call_rcu(&devcache[i]->rcu, free_devcache);
- }
- devcache[i] = new_dev;
- devcache[i]->device = device;
- if (bd) {
- bdevname(bd, devcache[i]->devname);
- bdput(bd);
- } else
- __bdevname(device, devcache[i]->devname);
- ret = devcache[i]->devname;
- spin_unlock(&devname_cache_lock);
- return ret;
-}
-EXPORT_SYMBOL(jbd2_dev_to_name);
-
MODULE_LICENSE("GPL");
module_init(journal_init);
module_exit(journal_exit);
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 27c511a1cf0..926d02068a1 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -227,7 +227,7 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
case ACL_TYPE_ACCESS:
xprefix = JFFS2_XPREFIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
rc = posix_acl_equiv_mode(acl, &mode);
if (rc < 0)
return rc;
@@ -259,7 +259,7 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
return rc;
}
-int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, mode_t *i_mode)
+int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, umode_t *i_mode)
{
struct posix_acl *acl;
int rc;
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index b3421c78d9f..9b477246f2a 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -28,7 +28,7 @@ struct jffs2_acl_header {
struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
extern int jffs2_acl_chmod(struct inode *);
-extern int jffs2_init_acl_pre(struct inode *, struct inode *, mode_t *);
+extern int jffs2_init_acl_pre(struct inode *, struct inode *, umode_t *);
extern int jffs2_init_acl_post(struct inode *);
extern const struct xattr_handler jffs2_acl_access_xattr_handler;
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index b81b35ddf4e..bbcb9755dd2 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -406,7 +406,7 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
fill in the raw_inode while you're at it. */
-struct inode *jffs2_new_inode (struct inode *dir_i, mode_t mode, struct jffs2_raw_inode *ri)
+struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
{
struct inode *inode;
struct super_block *sb = dir_i->i_sb;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 526979c607b..6c1755c59c0 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -173,7 +173,7 @@ int jffs2_do_setattr (struct inode *, struct iattr *);
struct inode *jffs2_iget(struct super_block *, unsigned long);
void jffs2_evict_inode (struct inode *);
void jffs2_dirty_inode(struct inode *inode, int flags);
-struct inode *jffs2_new_inode (struct inode *dir_i, mode_t mode,
+struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode,
struct jffs2_raw_inode *ri);
int jffs2_statfs (struct dentry *, struct kstatfs *);
int jffs2_remount_fs (struct super_block *, int *, char *);
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index b3a32caf2b4..45559dc3ea2 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -127,16 +127,14 @@ int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir)
return PTR_ERR(acl);
if (acl) {
- mode_t mode = inode->i_mode;
if (S_ISDIR(inode->i_mode)) {
rc = jfs_set_acl(tid, inode, ACL_TYPE_DEFAULT, acl);
if (rc)
goto cleanup;
}
- rc = posix_acl_create(&acl, GFP_KERNEL, &mode);
+ rc = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
if (rc < 0)
goto cleanup; /* posix_acl_release(NULL) is no-op */
- inode->i_mode = mode;
if (rc > 0)
rc = jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, acl);
cleanup:
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 24838f1eeee..e87fedef23d 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -693,8 +693,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
return rc;
}
if (acl) {
- mode_t mode = inode->i_mode;
- rc = posix_acl_equiv_mode(acl, &mode);
+ rc = posix_acl_equiv_mode(acl, &inode->i_mode);
posix_acl_release(acl);
if (rc < 0) {
printk(KERN_ERR
@@ -702,7 +701,6 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
rc);
return rc;
}
- inode->i_mode = mode;
mark_inode_dirty(inode);
}
/*
diff --git a/fs/namei.c b/fs/namei.c
index f8c69d37379..2826db35dc2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -179,19 +179,14 @@ static int check_acl(struct inode *inode, int mask)
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *acl;
- /*
- * Under RCU walk, we cannot even do a "get_cached_acl()",
- * because that involves locking and getting a refcount on
- * a cached ACL.
- *
- * So the only case we handle during RCU walking is the
- * case of a cached "no ACL at all", which needs no locks
- * or refcounts.
- */
if (mask & MAY_NOT_BLOCK) {
- if (negative_cached_acl(inode, ACL_TYPE_ACCESS))
+ acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
+ if (!acl)
return -EAGAIN;
- return -ECHILD;
+ /* no ->get_acl() calls in RCU mode... */
+ if (acl == ACL_NOT_CACHED)
+ return -ECHILD;
+ return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
}
acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
@@ -313,6 +308,26 @@ int generic_permission(struct inode *inode, int mask)
return -EACCES;
}
+/*
+ * We _really_ want to just do "generic_permission()" without
+ * even looking at the inode->i_op values. So we keep a cache
+ * flag in inode->i_opflags, that says "this has not special
+ * permission function, use the fast case".
+ */
+static inline int do_inode_permission(struct inode *inode, int mask)
+{
+ if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
+ if (likely(inode->i_op->permission))
+ return inode->i_op->permission(inode, mask);
+
+ /* This gets set once for the inode lifetime */
+ spin_lock(&inode->i_lock);
+ inode->i_opflags |= IOP_FASTPERM;
+ spin_unlock(&inode->i_lock);
+ }
+ return generic_permission(inode, mask);
+}
+
/**
* inode_permission - check for access rights to a given inode
* @inode: inode to check permission on
@@ -327,7 +342,7 @@ int inode_permission(struct inode *inode, int mask)
{
int retval;
- if (mask & MAY_WRITE) {
+ if (unlikely(mask & MAY_WRITE)) {
umode_t mode = inode->i_mode;
/*
@@ -344,11 +359,7 @@ int inode_permission(struct inode *inode, int mask)
return -EACCES;
}
- if (inode->i_op->permission)
- retval = inode->i_op->permission(inode, mask);
- else
- retval = generic_permission(inode, mask);
-
+ retval = do_inode_permission(inode, mask);
if (retval)
return retval;
@@ -716,19 +727,25 @@ static int follow_automount(struct path *path, unsigned flags,
if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
return -EISDIR; /* we actually want to stop here */
- /* We want to mount if someone is trying to open/create a file of any
- * type under the mountpoint, wants to traverse through the mountpoint
- * or wants to open the mounted directory.
- *
+ /*
* We don't want to mount if someone's just doing a stat and they've
* set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
* appended a '/' to the name.
*/
- if (!(flags & LOOKUP_FOLLOW) &&
- !(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
- LOOKUP_OPEN | LOOKUP_CREATE)))
- return -EISDIR;
-
+ if (!(flags & LOOKUP_FOLLOW)) {
+ /* We do, however, want to mount if someone wants to open or
+ * create a file of any type under the mountpoint, wants to
+ * traverse through the mountpoint or wants to open the mounted
+ * directory.
+ * Also, autofs may mark negative dentries as being automount
+ * points. These will need the attentions of the daemon to
+ * instantiate them before they can be used.
+ */
+ if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
+ LOOKUP_OPEN | LOOKUP_CREATE)) &&
+ path->dentry->d_inode)
+ return -EISDIR;
+ }
current->total_link_count++;
if (current->total_link_count >= 40)
return -ELOOP;
@@ -1244,6 +1261,26 @@ static void terminate_walk(struct nameidata *nd)
}
}
+/*
+ * Do we need to follow links? We _really_ want to be able
+ * to do this check without having to look at inode->i_op,
+ * so we keep a cache of "no, this doesn't need follow_link"
+ * for the common case.
+ */
+static inline int should_follow_link(struct inode *inode, int follow)
+{
+ if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
+ if (likely(inode->i_op->follow_link))
+ return follow;
+
+ /* This gets set once for the inode lifetime */
+ spin_lock(&inode->i_lock);
+ inode->i_opflags |= IOP_NOFOLLOW;
+ spin_unlock(&inode->i_lock);
+ }
+ return 0;
+}
+
static inline int walk_component(struct nameidata *nd, struct path *path,
struct qstr *name, int type, int follow)
{
@@ -1266,7 +1303,7 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
terminate_walk(nd);
return -ENOENT;
}
- if (unlikely(inode->i_op->follow_link) && follow) {
+ if (should_follow_link(inode, follow)) {
if (nd->flags & LOOKUP_RCU) {
if (unlikely(unlazy_walk(nd, path->dentry))) {
terminate_walk(nd);
@@ -1319,6 +1356,26 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
}
/*
+ * We really don't want to look at inode->i_op->lookup
+ * when we don't have to. So we keep a cache bit in
+ * the inode ->i_opflags field that says "yes, we can
+ * do lookup on this inode".
+ */
+static inline int can_lookup(struct inode *inode)
+{
+ if (likely(inode->i_opflags & IOP_LOOKUP))
+ return 1;
+ if (likely(!inode->i_op->lookup))
+ return 0;
+
+ /* We do this once for the lifetime of the inode */
+ spin_lock(&inode->i_lock);
+ inode->i_opflags |= IOP_LOOKUP;
+ spin_unlock(&inode->i_lock);
+ return 1;
+}
+
+/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
@@ -1397,10 +1454,10 @@ static int link_path_walk(const char *name, struct nameidata *nd)
if (err)
return err;
}
+ if (can_lookup(nd->inode))
+ continue;
err = -ENOTDIR;
- if (!nd->inode->i_op->lookup)
- break;
- continue;
+ break;
/* here ends the main loop */
last_component:
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 2cde5d95475..be020771c6b 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -79,15 +79,21 @@ config NFS_V4_1
depends on NFS_FS && NFS_V4 && EXPERIMENTAL
select SUNRPC_BACKCHANNEL
select PNFS_FILE_LAYOUT
+ select PNFS_BLOCK
+ select MD
+ select BLK_DEV_DM
help
This option enables support for minor version 1 of the NFSv4 protocol
- (RFC 5661) in the kernel's NFS client.
+ (RFC 5661 and RFC 5663) in the kernel's NFS client.
If unsure, say N.
config PNFS_FILE_LAYOUT
tristate
+config PNFS_BLOCK
+ tristate
+
config PNFS_OBJLAYOUT
tristate "Provide support for the pNFS Objects Layout Driver for NFSv4.1 pNFS (EXPERIMENTAL)"
depends on NFS_FS && NFS_V4_1 && SCSI_OSD_ULD
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 6a34f7dd0e6..b58613d0abb 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -23,3 +23,4 @@ obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
obj-$(CONFIG_PNFS_OBJLAYOUT) += objlayout/
+obj-$(CONFIG_PNFS_BLOCK) += blocklayout/
diff --git a/fs/nfs/blocklayout/Makefile b/fs/nfs/blocklayout/Makefile
new file mode 100644
index 00000000000..d5815505c02
--- /dev/null
+++ b/fs/nfs/blocklayout/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the pNFS block layout driver kernel module
+#
+obj-$(CONFIG_PNFS_BLOCK) += blocklayoutdriver.o
+blocklayoutdriver-objs := blocklayout.o extents.o blocklayoutdev.o blocklayoutdm.o
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
new file mode 100644
index 00000000000..e56564d2ef9
--- /dev/null
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -0,0 +1,1019 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayout.c
+ *
+ * Module for the NFSv4.1 pNFS block layout driver.
+ *
+ * Copyright (c) 2006 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@citi.umich.edu>
+ * Fred Isaman <iisaman@umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/bio.h> /* struct bio */
+#include <linux/buffer_head.h> /* various write calls */
+
+#include "blocklayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
+MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
+
+struct dentry *bl_device_pipe;
+wait_queue_head_t bl_wq;
+
+static void print_page(struct page *page)
+{
+ dprintk("PRINTPAGE page %p\n", page);
+ dprintk(" PagePrivate %d\n", PagePrivate(page));
+ dprintk(" PageUptodate %d\n", PageUptodate(page));
+ dprintk(" PageError %d\n", PageError(page));
+ dprintk(" PageDirty %d\n", PageDirty(page));
+ dprintk(" PageReferenced %d\n", PageReferenced(page));
+ dprintk(" PageLocked %d\n", PageLocked(page));
+ dprintk(" PageWriteback %d\n", PageWriteback(page));
+ dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
+ dprintk("\n");
+}
+
+/* Given the be associated with isect, determine if page data needs to be
+ * initialized.
+ */
+static int is_hole(struct pnfs_block_extent *be, sector_t isect)
+{
+ if (be->be_state == PNFS_BLOCK_NONE_DATA)
+ return 1;
+ else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
+ return 0;
+ else
+ return !bl_is_sector_init(be->be_inval, isect);
+}
+
+/* Given the be associated with isect, determine if page data can be
+ * written to disk.
+ */
+static int is_writable(struct pnfs_block_extent *be, sector_t isect)
+{
+ return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
+ be->be_state == PNFS_BLOCK_INVALID_DATA);
+}
+
+/* The data we are handed might be spread across several bios. We need
+ * to track when the last one is finished.
+ */
+struct parallel_io {
+ struct kref refcnt;
+ struct rpc_call_ops call_ops;
+ void (*pnfs_callback) (void *data);
+ void *data;
+};
+
+static inline struct parallel_io *alloc_parallel(void *data)
+{
+ struct parallel_io *rv;
+
+ rv = kmalloc(sizeof(*rv), GFP_NOFS);
+ if (rv) {
+ rv->data = data;
+ kref_init(&rv->refcnt);
+ }
+ return rv;
+}
+
+static inline void get_parallel(struct parallel_io *p)
+{
+ kref_get(&p->refcnt);
+}
+
+static void destroy_parallel(struct kref *kref)
+{
+ struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
+
+ dprintk("%s enter\n", __func__);
+ p->pnfs_callback(p->data);
+ kfree(p);
+}
+
+static inline void put_parallel(struct parallel_io *p)
+{
+ kref_put(&p->refcnt, destroy_parallel);
+}
+
+static struct bio *
+bl_submit_bio(int rw, struct bio *bio)
+{
+ if (bio) {
+ get_parallel(bio->bi_private);
+ dprintk("%s submitting %s bio %u@%llu\n", __func__,
+ rw == READ ? "read" : "write",
+ bio->bi_size, (unsigned long long)bio->bi_sector);
+ submit_bio(rw, bio);
+ }
+ return NULL;
+}
+
+static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+ struct parallel_io *par)
+{
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_NOIO, npg);
+ if (!bio)
+ return NULL;
+
+ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+ bio->bi_bdev = be->be_mdev;
+ bio->bi_end_io = end_io;
+ bio->bi_private = par;
+ return bio;
+}
+
+static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
+ sector_t isect, struct page *page,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+ struct parallel_io *par)
+{
+retry:
+ if (!bio) {
+ bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+ }
+ if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+ bio = bl_submit_bio(rw, bio);
+ goto retry;
+ }
+ return bio;
+}
+
+static void bl_set_lo_fail(struct pnfs_layout_segment *lseg)
+{
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+ dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ } else {
+ dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ }
+}
+
+/* This is basically copied from mpage_end_io_read */
+static void bl_end_io_read(struct bio *bio, int err)
+{
+ struct parallel_io *par = bio->bi_private;
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
+
+ do {
+ struct page *page = bvec->bv_page;
+
+ if (--bvec >= bio->bi_io_vec)
+ prefetchw(&bvec->bv_page->flags);
+ if (uptodate)
+ SetPageUptodate(page);
+ } while (bvec >= bio->bi_io_vec);
+ if (!uptodate) {
+ if (!rdata->pnfs_error)
+ rdata->pnfs_error = -EIO;
+ bl_set_lo_fail(rdata->lseg);
+ }
+ bio_put(bio);
+ put_parallel(par);
+}
+
+static void bl_read_cleanup(struct work_struct *work)
+{
+ struct rpc_task *task;
+ struct nfs_read_data *rdata;
+ dprintk("%s enter\n", __func__);
+ task = container_of(work, struct rpc_task, u.tk_work);
+ rdata = container_of(task, struct nfs_read_data, task);
+ pnfs_ld_read_done(rdata);
+}
+
+static void
+bl_end_par_io_read(void *data)
+{
+ struct nfs_read_data *rdata = data;
+
+ INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
+ schedule_work(&rdata->task.u.tk_work);
+}
+
+/* We don't want normal .rpc_call_done callback used, so we replace it
+ * with this stub.
+ */
+static void bl_rpc_do_nothing(struct rpc_task *task, void *calldata)
+{
+ return;
+}
+
+static enum pnfs_try_status
+bl_read_pagelist(struct nfs_read_data *rdata)
+{
+ int i, hole;
+ struct bio *bio = NULL;
+ struct pnfs_block_extent *be = NULL, *cow_read = NULL;
+ sector_t isect, extent_length = 0;
+ struct parallel_io *par;
+ loff_t f_offset = rdata->args.offset;
+ size_t count = rdata->args.count;
+ struct page **pages = rdata->args.pages;
+ int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
+
+ dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__,
+ rdata->npages, f_offset, count);
+
+ par = alloc_parallel(rdata);
+ if (!par)
+ goto use_mds;
+ par->call_ops = *rdata->mds_ops;
+ par->call_ops.rpc_call_done = bl_rpc_do_nothing;
+ par->pnfs_callback = bl_end_par_io_read;
+ /* At this point, we can no longer jump to use_mds */
+
+ isect = (sector_t) (f_offset >> SECTOR_SHIFT);
+ /* Code assumes extents are page-aligned */
+ for (i = pg_index; i < rdata->npages; i++) {
+ if (!extent_length) {
+ /* We've used up the previous extent */
+ bl_put_extent(be);
+ bl_put_extent(cow_read);
+ bio = bl_submit_bio(READ, bio);
+ /* Get the next one */
+ be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
+ isect, &cow_read);
+ if (!be) {
+ rdata->pnfs_error = -EIO;
+ goto out;
+ }
+ extent_length = be->be_length -
+ (isect - be->be_f_offset);
+ if (cow_read) {
+ sector_t cow_length = cow_read->be_length -
+ (isect - cow_read->be_f_offset);
+ extent_length = min(extent_length, cow_length);
+ }
+ }
+ hole = is_hole(be, isect);
+ if (hole && !cow_read) {
+ bio = bl_submit_bio(READ, bio);
+ /* Fill hole w/ zeroes w/o accessing device */
+ dprintk("%s Zeroing page for hole\n", __func__);
+ zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
+ print_page(pages[i]);
+ SetPageUptodate(pages[i]);
+ } else {
+ struct pnfs_block_extent *be_read;
+
+ be_read = (hole && cow_read) ? cow_read : be;
+ bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
+ isect, pages[i], be_read,
+ bl_end_io_read, par);
+ if (IS_ERR(bio)) {
+ rdata->pnfs_error = PTR_ERR(bio);
+ goto out;
+ }
+ }
+ isect += PAGE_CACHE_SECTORS;
+ extent_length -= PAGE_CACHE_SECTORS;
+ }
+ if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
+ rdata->res.eof = 1;
+ rdata->res.count = rdata->inode->i_size - f_offset;
+ } else {
+ rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
+ }
+out:
+ bl_put_extent(be);
+ bl_put_extent(cow_read);
+ bl_submit_bio(READ, bio);
+ put_parallel(par);
+ return PNFS_ATTEMPTED;
+
+ use_mds:
+ dprintk("Giving up and using normal NFS\n");
+ return PNFS_NOT_ATTEMPTED;
+}
+
+static void mark_extents_written(struct pnfs_block_layout *bl,
+ __u64 offset, __u32 count)
+{
+ sector_t isect, end;
+ struct pnfs_block_extent *be;
+
+ dprintk("%s(%llu, %u)\n", __func__, offset, count);
+ if (count == 0)
+ return;
+ isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
+ end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
+ end >>= SECTOR_SHIFT;
+ while (isect < end) {
+ sector_t len;
+ be = bl_find_get_extent(bl, isect, NULL);
+ BUG_ON(!be); /* FIXME */
+ len = min(end, be->be_f_offset + be->be_length) - isect;
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA)
+ bl_mark_for_commit(be, isect, len); /* What if fails? */
+ isect += len;
+ bl_put_extent(be);
+ }
+}
+
+static void bl_end_io_write_zero(struct bio *bio, int err)
+{
+ struct parallel_io *par = bio->bi_private;
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
+
+ do {
+ struct page *page = bvec->bv_page;
+
+ if (--bvec >= bio->bi_io_vec)
+ prefetchw(&bvec->bv_page->flags);
+ /* This is the zeroing page we added */
+ end_page_writeback(page);
+ page_cache_release(page);
+ } while (bvec >= bio->bi_io_vec);
+ if (!uptodate) {
+ if (!wdata->pnfs_error)
+ wdata->pnfs_error = -EIO;
+ bl_set_lo_fail(wdata->lseg);
+ }
+ bio_put(bio);
+ put_parallel(par);
+}
+
+/* This is basically copied from mpage_end_io_read */
+static void bl_end_io_write(struct bio *bio, int err)
+{
+ struct parallel_io *par = bio->bi_private;
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
+
+ if (!uptodate) {
+ if (!wdata->pnfs_error)
+ wdata->pnfs_error = -EIO;
+ bl_set_lo_fail(wdata->lseg);
+ }
+ bio_put(bio);
+ put_parallel(par);
+}
+
+/* Function scheduled for call during bl_end_par_io_write,
+ * it marks sectors as written and extends the commitlist.
+ */
+static void bl_write_cleanup(struct work_struct *work)
+{
+ struct rpc_task *task;
+ struct nfs_write_data *wdata;
+ dprintk("%s enter\n", __func__);
+ task = container_of(work, struct rpc_task, u.tk_work);
+ wdata = container_of(task, struct nfs_write_data, task);
+ if (!wdata->pnfs_error) {
+ /* Marks for LAYOUTCOMMIT */
+ mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+ wdata->args.offset, wdata->args.count);
+ }
+ pnfs_ld_write_done(wdata);
+}
+
+/* Called when last of bios associated with a bl_write_pagelist call finishes */
+static void bl_end_par_io_write(void *data)
+{
+ struct nfs_write_data *wdata = data;
+
+ wdata->task.tk_status = 0;
+ wdata->verf.committed = NFS_FILE_SYNC;
+ INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
+ schedule_work(&wdata->task.u.tk_work);
+}
+
+/* FIXME STUB - mark intersection of layout and page as bad, so is not
+ * used again.
+ */
+static void mark_bad_read(void)
+{
+ return;
+}
+
+/*
+ * map_block: map a requested I/0 block (isect) into an offset in the LVM
+ * block_device
+ */
+static void
+map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
+{
+ dprintk("%s enter be=%p\n", __func__, be);
+
+ set_buffer_mapped(bh);
+ bh->b_bdev = be->be_mdev;
+ bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
+ (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
+
+ dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
+ __func__, (unsigned long long)isect, (long)bh->b_blocknr,
+ bh->b_size);
+ return;
+}
+
+/* Given an unmapped page, zero it or read in page for COW, page is locked
+ * by caller.
+ */
+static int
+init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
+{
+ struct buffer_head *bh = NULL;
+ int ret = 0;
+ sector_t isect;
+
+ dprintk("%s enter, %p\n", __func__, page);
+ BUG_ON(PageUptodate(page));
+ if (!cow_read) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ SetPageUptodate(page);
+ goto cleanup;
+ }
+
+ bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
+ if (!bh) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
+ map_block(bh, isect, cow_read);
+ if (!bh_uptodate_or_lock(bh))
+ ret = bh_submit_read(bh);
+ if (ret)
+ goto cleanup;
+ SetPageUptodate(page);
+
+cleanup:
+ bl_put_extent(cow_read);
+ if (bh)
+ free_buffer_head(bh);
+ if (ret) {
+ /* Need to mark layout with bad read...should now
+ * just use nfs4 for reads and writes.
+ */
+ mark_bad_read();
+ }
+ return ret;
+}
+
+static enum pnfs_try_status
+bl_write_pagelist(struct nfs_write_data *wdata, int sync)
+{
+ int i, ret, npg_zero, pg_index, last = 0;
+ struct bio *bio = NULL;
+ struct pnfs_block_extent *be = NULL, *cow_read = NULL;
+ sector_t isect, last_isect = 0, extent_length = 0;
+ struct parallel_io *par;
+ loff_t offset = wdata->args.offset;
+ size_t count = wdata->args.count;
+ struct page **pages = wdata->args.pages;
+ struct page *page;
+ pgoff_t index;
+ u64 temp;
+ int npg_per_block =
+ NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
+
+ dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
+ /* At this point, wdata->pages is a (sequential) list of nfs_pages.
+ * We want to write each, and if there is an error set pnfs_error
+ * to have it redone using nfs.
+ */
+ par = alloc_parallel(wdata);
+ if (!par)
+ return PNFS_NOT_ATTEMPTED;
+ par->call_ops = *wdata->mds_ops;
+ par->call_ops.rpc_call_done = bl_rpc_do_nothing;
+ par->pnfs_callback = bl_end_par_io_write;
+ /* At this point, have to be more careful with error handling */
+
+ isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
+ be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
+ if (!be || !is_writable(be, isect)) {
+ dprintk("%s no matching extents!\n", __func__);
+ wdata->pnfs_error = -EINVAL;
+ goto out;
+ }
+
+ /* First page inside INVALID extent */
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ temp = offset >> PAGE_CACHE_SHIFT;
+ npg_zero = do_div(temp, npg_per_block);
+ isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
+ (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
+ extent_length = be->be_length - (isect - be->be_f_offset);
+
+fill_invalid_ext:
+ dprintk("%s need to zero %d pages\n", __func__, npg_zero);
+ for (;npg_zero > 0; npg_zero--) {
+ /* page ref released in bl_end_io_write_zero */
+ index = isect >> PAGE_CACHE_SECTOR_SHIFT;
+ dprintk("%s zero %dth page: index %lu isect %llu\n",
+ __func__, npg_zero, index,
+ (unsigned long long)isect);
+ page =
+ find_or_create_page(wdata->inode->i_mapping, index,
+ GFP_NOFS);
+ if (!page) {
+ dprintk("%s oom\n", __func__);
+ wdata->pnfs_error = -ENOMEM;
+ goto out;
+ }
+
+ /* PageDirty: Other will write this out
+ * PageWriteback: Other is writing this out
+ * PageUptodate: It was read before
+ * sector_initialized: already written out
+ */
+ if (PageDirty(page) || PageWriteback(page) ||
+ bl_is_sector_init(be->be_inval, isect)) {
+ print_page(page);
+ unlock_page(page);
+ page_cache_release(page);
+ goto next_page;
+ }
+ if (!PageUptodate(page)) {
+ /* New page, readin or zero it */
+ init_page_for_write(page, cow_read);
+ }
+ set_page_writeback(page);
+ unlock_page(page);
+
+ ret = bl_mark_sectors_init(be->be_inval, isect,
+ PAGE_CACHE_SECTORS,
+ NULL);
+ if (unlikely(ret)) {
+ dprintk("%s bl_mark_sectors_init fail %d\n",
+ __func__, ret);
+ end_page_writeback(page);
+ page_cache_release(page);
+ wdata->pnfs_error = ret;
+ goto out;
+ }
+ bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
+ isect, page, be,
+ bl_end_io_write_zero, par);
+ if (IS_ERR(bio)) {
+ wdata->pnfs_error = PTR_ERR(bio);
+ goto out;
+ }
+ /* FIXME: This should be done in bi_end_io */
+ mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+ page->index << PAGE_CACHE_SHIFT,
+ PAGE_CACHE_SIZE);
+next_page:
+ isect += PAGE_CACHE_SECTORS;
+ extent_length -= PAGE_CACHE_SECTORS;
+ }
+ if (last)
+ goto write_done;
+ }
+ bio = bl_submit_bio(WRITE, bio);
+
+ /* Middle pages */
+ pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
+ for (i = pg_index; i < wdata->npages; i++) {
+ if (!extent_length) {
+ /* We've used up the previous extent */
+ bl_put_extent(be);
+ bio = bl_submit_bio(WRITE, bio);
+ /* Get the next one */
+ be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
+ isect, NULL);
+ if (!be || !is_writable(be, isect)) {
+ wdata->pnfs_error = -EINVAL;
+ goto out;
+ }
+ extent_length = be->be_length -
+ (isect - be->be_f_offset);
+ }
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ ret = bl_mark_sectors_init(be->be_inval, isect,
+ PAGE_CACHE_SECTORS,
+ NULL);
+ if (unlikely(ret)) {
+ dprintk("%s bl_mark_sectors_init fail %d\n",
+ __func__, ret);
+ wdata->pnfs_error = ret;
+ goto out;
+ }
+ }
+ bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
+ isect, pages[i], be,
+ bl_end_io_write, par);
+ if (IS_ERR(bio)) {
+ wdata->pnfs_error = PTR_ERR(bio);
+ goto out;
+ }
+ isect += PAGE_CACHE_SECTORS;
+ last_isect = isect;
+ extent_length -= PAGE_CACHE_SECTORS;
+ }
+
+ /* Last page inside INVALID extent */
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ bio = bl_submit_bio(WRITE, bio);
+ temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
+ npg_zero = npg_per_block - do_div(temp, npg_per_block);
+ if (npg_zero < npg_per_block) {
+ last = 1;
+ goto fill_invalid_ext;
+ }
+ }
+
+write_done:
+ wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
+ if (count < wdata->res.count) {
+ wdata->res.count = count;
+ }
+out:
+ bl_put_extent(be);
+ bl_submit_bio(WRITE, bio);
+ put_parallel(par);
+ return PNFS_ATTEMPTED;
+}
+
+/* FIXME - range ignored */
+static void
+release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
+{
+ int i;
+ struct pnfs_block_extent *be;
+
+ spin_lock(&bl->bl_ext_lock);
+ for (i = 0; i < EXTENT_LISTS; i++) {
+ while (!list_empty(&bl->bl_extents[i])) {
+ be = list_first_entry(&bl->bl_extents[i],
+ struct pnfs_block_extent,
+ be_node);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ }
+ }
+ spin_unlock(&bl->bl_ext_lock);
+}
+
+static void
+release_inval_marks(struct pnfs_inval_markings *marks)
+{
+ struct pnfs_inval_tracking *pos, *temp;
+
+ list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
+ list_del(&pos->it_link);
+ kfree(pos);
+ }
+ return;
+}
+
+static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
+
+ dprintk("%s enter\n", __func__);
+ release_extents(bl, NULL);
+ release_inval_marks(&bl->bl_inval);
+ kfree(bl);
+}
+
+static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
+ gfp_t gfp_flags)
+{
+ struct pnfs_block_layout *bl;
+
+ dprintk("%s enter\n", __func__);
+ bl = kzalloc(sizeof(*bl), gfp_flags);
+ if (!bl)
+ return NULL;
+ spin_lock_init(&bl->bl_ext_lock);
+ INIT_LIST_HEAD(&bl->bl_extents[0]);
+ INIT_LIST_HEAD(&bl->bl_extents[1]);
+ INIT_LIST_HEAD(&bl->bl_commit);
+ INIT_LIST_HEAD(&bl->bl_committing);
+ bl->bl_count = 0;
+ bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
+ BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
+ return &bl->bl_layout;
+}
+
+static void bl_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ dprintk("%s enter\n", __func__);
+ kfree(lseg);
+}
+
+/* We pretty much ignore lseg, and store all data layout wide, so we
+ * can correctly merge.
+ */
+static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
+ struct nfs4_layoutget_res *lgr,
+ gfp_t gfp_flags)
+{
+ struct pnfs_layout_segment *lseg;
+ int status;
+
+ dprintk("%s enter\n", __func__);
+ lseg = kzalloc(sizeof(*lseg), gfp_flags);
+ if (!lseg)
+ return ERR_PTR(-ENOMEM);
+ status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
+ if (status) {
+ /* We don't want to call the full-blown bl_free_lseg,
+ * since on error extents were not touched.
+ */
+ kfree(lseg);
+ return ERR_PTR(status);
+ }
+ return lseg;
+}
+
+static void
+bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *arg)
+{
+ dprintk("%s enter\n", __func__);
+ encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
+}
+
+static void
+bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
+{
+ struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
+
+ dprintk("%s enter\n", __func__);
+ clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
+}
+
+static void free_blk_mountid(struct block_mount_id *mid)
+{
+ if (mid) {
+ struct pnfs_block_dev *dev;
+ spin_lock(&mid->bm_lock);
+ while (!list_empty(&mid->bm_devlist)) {
+ dev = list_first_entry(&mid->bm_devlist,
+ struct pnfs_block_dev,
+ bm_node);
+ list_del(&dev->bm_node);
+ bl_free_block_dev(dev);
+ }
+ spin_unlock(&mid->bm_lock);
+ kfree(mid);
+ }
+}
+
+/* This is mostly copied from the filelayout's get_device_info function.
+ * It seems much of this should be at the generic pnfs level.
+ */
+static struct pnfs_block_dev *
+nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
+ struct nfs4_deviceid *d_id)
+{
+ struct pnfs_device *dev;
+ struct pnfs_block_dev *rv = NULL;
+ u32 max_resp_sz;
+ int max_pages;
+ struct page **pages = NULL;
+ int i, rc;
+
+ /*
+ * Use the session max response size as the basis for setting
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ max_pages = max_resp_sz >> PAGE_SHIFT;
+ dprintk("%s max_resp_sz %u max_pages %d\n",
+ __func__, max_resp_sz, max_pages);
+
+ dev = kmalloc(sizeof(*dev), GFP_NOFS);
+ if (!dev) {
+ dprintk("%s kmalloc failed\n", __func__);
+ return NULL;
+ }
+
+ pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
+ if (pages == NULL) {
+ kfree(dev);
+ return NULL;
+ }
+ for (i = 0; i < max_pages; i++) {
+ pages[i] = alloc_page(GFP_NOFS);
+ if (!pages[i])
+ goto out_free;
+ }
+
+ memcpy(&dev->dev_id, d_id, sizeof(*d_id));
+ dev->layout_type = LAYOUT_BLOCK_VOLUME;
+ dev->pages = pages;
+ dev->pgbase = 0;
+ dev->pglen = PAGE_SIZE * max_pages;
+ dev->mincount = 0;
+
+ dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
+ rc = nfs4_proc_getdeviceinfo(server, dev);
+ dprintk("%s getdevice info returns %d\n", __func__, rc);
+ if (rc)
+ goto out_free;
+
+ rv = nfs4_blk_decode_device(server, dev);
+ out_free:
+ for (i = 0; i < max_pages; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+ kfree(dev);
+ return rv;
+}
+
+static int
+bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
+{
+ struct block_mount_id *b_mt_id = NULL;
+ struct pnfs_devicelist *dlist = NULL;
+ struct pnfs_block_dev *bdev;
+ LIST_HEAD(block_disklist);
+ int status = 0, i;
+
+ dprintk("%s enter\n", __func__);
+
+ if (server->pnfs_blksize == 0) {
+ dprintk("%s Server did not return blksize\n", __func__);
+ return -EINVAL;
+ }
+ b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
+ if (!b_mt_id) {
+ status = -ENOMEM;
+ goto out_error;
+ }
+ /* Initialize nfs4 block layout mount id */
+ spin_lock_init(&b_mt_id->bm_lock);
+ INIT_LIST_HEAD(&b_mt_id->bm_devlist);
+
+ dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
+ if (!dlist) {
+ status = -ENOMEM;
+ goto out_error;
+ }
+ dlist->eof = 0;
+ while (!dlist->eof) {
+ status = nfs4_proc_getdevicelist(server, fh, dlist);
+ if (status)
+ goto out_error;
+ dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
+ __func__, dlist->num_devs, dlist->eof);
+ for (i = 0; i < dlist->num_devs; i++) {
+ bdev = nfs4_blk_get_deviceinfo(server, fh,
+ &dlist->dev_id[i]);
+ if (!bdev) {
+ status = -ENODEV;
+ goto out_error;
+ }
+ spin_lock(&b_mt_id->bm_lock);
+ list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
+ spin_unlock(&b_mt_id->bm_lock);
+ }
+ }
+ dprintk("%s SUCCESS\n", __func__);
+ server->pnfs_ld_data = b_mt_id;
+
+ out_return:
+ kfree(dlist);
+ return status;
+
+ out_error:
+ free_blk_mountid(b_mt_id);
+ goto out_return;
+}
+
+static int
+bl_clear_layoutdriver(struct nfs_server *server)
+{
+ struct block_mount_id *b_mt_id = server->pnfs_ld_data;
+
+ dprintk("%s enter\n", __func__);
+ free_blk_mountid(b_mt_id);
+ dprintk("%s RETURNS\n", __func__);
+ return 0;
+}
+
+static const struct nfs_pageio_ops bl_pg_read_ops = {
+ .pg_init = pnfs_generic_pg_init_read,
+ .pg_test = pnfs_generic_pg_test,
+ .pg_doio = pnfs_generic_pg_readpages,
+};
+
+static const struct nfs_pageio_ops bl_pg_write_ops = {
+ .pg_init = pnfs_generic_pg_init_write,
+ .pg_test = pnfs_generic_pg_test,
+ .pg_doio = pnfs_generic_pg_writepages,
+};
+
+static struct pnfs_layoutdriver_type blocklayout_type = {
+ .id = LAYOUT_BLOCK_VOLUME,
+ .name = "LAYOUT_BLOCK_VOLUME",
+ .read_pagelist = bl_read_pagelist,
+ .write_pagelist = bl_write_pagelist,
+ .alloc_layout_hdr = bl_alloc_layout_hdr,
+ .free_layout_hdr = bl_free_layout_hdr,
+ .alloc_lseg = bl_alloc_lseg,
+ .free_lseg = bl_free_lseg,
+ .encode_layoutcommit = bl_encode_layoutcommit,
+ .cleanup_layoutcommit = bl_cleanup_layoutcommit,
+ .set_layoutdriver = bl_set_layoutdriver,
+ .clear_layoutdriver = bl_clear_layoutdriver,
+ .pg_read_ops = &bl_pg_read_ops,
+ .pg_write_ops = &bl_pg_write_ops,
+};
+
+static const struct rpc_pipe_ops bl_upcall_ops = {
+ .upcall = bl_pipe_upcall,
+ .downcall = bl_pipe_downcall,
+ .destroy_msg = bl_pipe_destroy_msg,
+};
+
+static int __init nfs4blocklayout_init(void)
+{
+ struct vfsmount *mnt;
+ struct path path;
+ int ret;
+
+ dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
+
+ ret = pnfs_register_layoutdriver(&blocklayout_type);
+ if (ret)
+ goto out;
+
+ init_waitqueue_head(&bl_wq);
+
+ mnt = rpc_get_mount();
+ if (IS_ERR(mnt)) {
+ ret = PTR_ERR(mnt);
+ goto out_remove;
+ }
+
+ ret = vfs_path_lookup(mnt->mnt_root,
+ mnt,
+ NFS_PIPE_DIRNAME, 0, &path);
+ if (ret)
+ goto out_remove;
+
+ bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
+ &bl_upcall_ops, 0);
+ if (IS_ERR(bl_device_pipe)) {
+ ret = PTR_ERR(bl_device_pipe);
+ goto out_remove;
+ }
+out:
+ return ret;
+
+out_remove:
+ pnfs_unregister_layoutdriver(&blocklayout_type);
+ return ret;
+}
+
+static void __exit nfs4blocklayout_exit(void)
+{
+ dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
+ __func__);
+
+ pnfs_unregister_layoutdriver(&blocklayout_type);
+ rpc_unlink(bl_device_pipe);
+}
+
+MODULE_ALIAS("nfs-layouttype4-3");
+
+module_init(nfs4blocklayout_init);
+module_exit(nfs4blocklayout_exit);
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
new file mode 100644
index 00000000000..f27d827960a
--- /dev/null
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -0,0 +1,207 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayout.h
+ *
+ * Module for the NFSv4.1 pNFS block layout driver.
+ *
+ * Copyright (c) 2006 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@citi.umich.edu>
+ * Fred Isaman <iisaman@umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+#ifndef FS_NFS_NFS4BLOCKLAYOUT_H
+#define FS_NFS_NFS4BLOCKLAYOUT_H
+
+#include <linux/device-mapper.h>
+#include <linux/nfs_fs.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+
+#include "../pnfs.h"
+
+#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
+#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
+
+struct block_mount_id {
+ spinlock_t bm_lock; /* protects list */
+ struct list_head bm_devlist; /* holds pnfs_block_dev */
+};
+
+struct pnfs_block_dev {
+ struct list_head bm_node;
+ struct nfs4_deviceid bm_mdevid; /* associated devid */
+ struct block_device *bm_mdev; /* meta device itself */
+};
+
+enum exstate4 {
+ PNFS_BLOCK_READWRITE_DATA = 0,
+ PNFS_BLOCK_READ_DATA = 1,
+ PNFS_BLOCK_INVALID_DATA = 2, /* mapped, but data is invalid */
+ PNFS_BLOCK_NONE_DATA = 3 /* unmapped, it's a hole */
+};
+
+#define MY_MAX_TAGS (15) /* tag bitnums used must be less than this */
+
+struct my_tree {
+ sector_t mtt_step_size; /* Internal sector alignment */
+ struct list_head mtt_stub; /* Should be a radix tree */
+};
+
+struct pnfs_inval_markings {
+ spinlock_t im_lock;
+ struct my_tree im_tree; /* Sectors that need LAYOUTCOMMIT */
+ sector_t im_block_size; /* Server blocksize in sectors */
+};
+
+struct pnfs_inval_tracking {
+ struct list_head it_link;
+ int it_sector;
+ int it_tags;
+};
+
+/* sector_t fields are all in 512-byte sectors */
+struct pnfs_block_extent {
+ struct kref be_refcnt;
+ struct list_head be_node; /* link into lseg list */
+ struct nfs4_deviceid be_devid; /* FIXME: could use device cache instead */
+ struct block_device *be_mdev;
+ sector_t be_f_offset; /* the starting offset in the file */
+ sector_t be_length; /* the size of the extent */
+ sector_t be_v_offset; /* the starting offset in the volume */
+ enum exstate4 be_state; /* the state of this extent */
+ struct pnfs_inval_markings *be_inval; /* tracks INVAL->RW transition */
+};
+
+/* Shortened extent used by LAYOUTCOMMIT */
+struct pnfs_block_short_extent {
+ struct list_head bse_node;
+ struct nfs4_deviceid bse_devid;
+ struct block_device *bse_mdev;
+ sector_t bse_f_offset; /* the starting offset in the file */
+ sector_t bse_length; /* the size of the extent */
+};
+
+static inline void
+BL_INIT_INVAL_MARKS(struct pnfs_inval_markings *marks, sector_t blocksize)
+{
+ spin_lock_init(&marks->im_lock);
+ INIT_LIST_HEAD(&marks->im_tree.mtt_stub);
+ marks->im_block_size = blocksize;
+ marks->im_tree.mtt_step_size = min((sector_t)PAGE_CACHE_SECTORS,
+ blocksize);
+}
+
+enum extentclass4 {
+ RW_EXTENT = 0, /* READWRTE and INVAL */
+ RO_EXTENT = 1, /* READ and NONE */
+ EXTENT_LISTS = 2,
+};
+
+static inline int bl_choose_list(enum exstate4 state)
+{
+ if (state == PNFS_BLOCK_READ_DATA || state == PNFS_BLOCK_NONE_DATA)
+ return RO_EXTENT;
+ else
+ return RW_EXTENT;
+}
+
+struct pnfs_block_layout {
+ struct pnfs_layout_hdr bl_layout;
+ struct pnfs_inval_markings bl_inval; /* tracks INVAL->RW transition */
+ spinlock_t bl_ext_lock; /* Protects list manipulation */
+ struct list_head bl_extents[EXTENT_LISTS]; /* R and RW extents */
+ struct list_head bl_commit; /* Needs layout commit */
+ struct list_head bl_committing; /* Layout committing */
+ unsigned int bl_count; /* entries in bl_commit */
+ sector_t bl_blocksize; /* Server blocksize in sectors */
+};
+
+#define BLK_ID(lo) ((struct block_mount_id *)(NFS_SERVER(lo->plh_inode)->pnfs_ld_data))
+
+static inline struct pnfs_block_layout *
+BLK_LO2EXT(struct pnfs_layout_hdr *lo)
+{
+ return container_of(lo, struct pnfs_block_layout, bl_layout);
+}
+
+static inline struct pnfs_block_layout *
+BLK_LSEG2EXT(struct pnfs_layout_segment *lseg)
+{
+ return BLK_LO2EXT(lseg->pls_layout);
+}
+
+struct bl_dev_msg {
+ int status;
+ uint32_t major, minor;
+};
+
+struct bl_msg_hdr {
+ u8 type;
+ u16 totallen; /* length of entire message, including hdr itself */
+};
+
+extern struct dentry *bl_device_pipe;
+extern wait_queue_head_t bl_wq;
+
+#define BL_DEVICE_UMOUNT 0x0 /* Umount--delete devices */
+#define BL_DEVICE_MOUNT 0x1 /* Mount--create devices*/
+#define BL_DEVICE_REQUEST_INIT 0x0 /* Start request */
+#define BL_DEVICE_REQUEST_PROC 0x1 /* User level process succeeds */
+#define BL_DEVICE_REQUEST_ERR 0x2 /* User level process fails */
+
+/* blocklayoutdev.c */
+ssize_t bl_pipe_upcall(struct file *, struct rpc_pipe_msg *,
+ char __user *, size_t);
+ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t);
+void bl_pipe_destroy_msg(struct rpc_pipe_msg *);
+struct block_device *nfs4_blkdev_get(dev_t dev);
+int nfs4_blkdev_put(struct block_device *bdev);
+struct pnfs_block_dev *nfs4_blk_decode_device(struct nfs_server *server,
+ struct pnfs_device *dev);
+int nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
+
+/* blocklayoutdm.c */
+void bl_free_block_dev(struct pnfs_block_dev *bdev);
+
+/* extents.c */
+struct pnfs_block_extent *
+bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect,
+ struct pnfs_block_extent **cow_read);
+int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
+ sector_t offset, sector_t length,
+ sector_t **pages);
+void bl_put_extent(struct pnfs_block_extent *be);
+struct pnfs_block_extent *bl_alloc_extent(void);
+int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect);
+int encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *arg);
+void clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
+ const struct nfs4_layoutcommit_args *arg,
+ int status);
+int bl_add_merge_extent(struct pnfs_block_layout *bl,
+ struct pnfs_block_extent *new);
+int bl_mark_for_commit(struct pnfs_block_extent *be,
+ sector_t offset, sector_t length);
+
+#endif /* FS_NFS_NFS4BLOCKLAYOUT_H */
diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
new file mode 100644
index 00000000000..a83b393fb01
--- /dev/null
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -0,0 +1,410 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayoutdev.c
+ *
+ * Device operations for the pnfs nfs4 file layout driver.
+ *
+ * Copyright (c) 2006 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@citi.umich.edu>
+ * Fred Isaman <iisaman@umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+#include <linux/module.h>
+#include <linux/buffer_head.h> /* __bread */
+
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/hash.h>
+
+#include "blocklayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+static int decode_sector_number(__be32 **rp, sector_t *sp)
+{
+ uint64_t s;
+
+ *rp = xdr_decode_hyper(*rp, &s);
+ if (s & 0x1ff) {
+ printk(KERN_WARNING "%s: sector not aligned\n", __func__);
+ return -1;
+ }
+ *sp = s >> SECTOR_SHIFT;
+ return 0;
+}
+
+/* Open a block_device by device number. */
+struct block_device *nfs4_blkdev_get(dev_t dev)
+{
+ struct block_device *bd;
+
+ dprintk("%s enter\n", __func__);
+ bd = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+ if (IS_ERR(bd))
+ goto fail;
+ return bd;
+fail:
+ dprintk("%s failed to open device : %ld\n",
+ __func__, PTR_ERR(bd));
+ return NULL;
+}
+
+/*
+ * Release the block device
+ */
+int nfs4_blkdev_put(struct block_device *bdev)
+{
+ dprintk("%s for device %d:%d\n", __func__, MAJOR(bdev->bd_dev),
+ MINOR(bdev->bd_dev));
+ return blkdev_put(bdev, FMODE_READ);
+}
+
+/*
+ * Shouldn't there be a rpc_generic_upcall() to do this for us?
+ */
+ssize_t bl_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+ char __user *dst, size_t buflen)
+{
+ char *data = (char *)msg->data + msg->copied;
+ size_t mlen = min(msg->len - msg->copied, buflen);
+ unsigned long left;
+
+ left = copy_to_user(dst, data, mlen);
+ if (left == mlen) {
+ msg->errno = -EFAULT;
+ return -EFAULT;
+ }
+
+ mlen -= left;
+ msg->copied += mlen;
+ msg->errno = 0;
+ return mlen;
+}
+
+static struct bl_dev_msg bl_mount_reply;
+
+ssize_t bl_pipe_downcall(struct file *filp, const char __user *src,
+ size_t mlen)
+{
+ if (mlen != sizeof (struct bl_dev_msg))
+ return -EINVAL;
+
+ if (copy_from_user(&bl_mount_reply, src, mlen) != 0)
+ return -EFAULT;
+
+ wake_up(&bl_wq);
+
+ return mlen;
+}
+
+void bl_pipe_destroy_msg(struct rpc_pipe_msg *msg)
+{
+ if (msg->errno >= 0)
+ return;
+ wake_up(&bl_wq);
+}
+
+/*
+ * Decodes pnfs_block_deviceaddr4 which is XDR encoded in dev->dev_addr_buf.
+ */
+struct pnfs_block_dev *
+nfs4_blk_decode_device(struct nfs_server *server,
+ struct pnfs_device *dev)
+{
+ struct pnfs_block_dev *rv = NULL;
+ struct block_device *bd = NULL;
+ struct rpc_pipe_msg msg;
+ struct bl_msg_hdr bl_msg = {
+ .type = BL_DEVICE_MOUNT,
+ .totallen = dev->mincount,
+ };
+ uint8_t *dataptr;
+ DECLARE_WAITQUEUE(wq, current);
+ struct bl_dev_msg *reply = &bl_mount_reply;
+ int offset, len, i;
+
+ dprintk("%s CREATING PIPEFS MESSAGE\n", __func__);
+ dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data,
+ dev->mincount);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.data = kzalloc(sizeof(bl_msg) + dev->mincount, GFP_NOFS);
+ if (!msg.data) {
+ rv = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ memcpy(msg.data, &bl_msg, sizeof(bl_msg));
+ dataptr = (uint8_t *) msg.data;
+ len = dev->mincount;
+ offset = sizeof(bl_msg);
+ for (i = 0; len > 0; i++) {
+ memcpy(&dataptr[offset], page_address(dev->pages[i]),
+ len < PAGE_CACHE_SIZE ? len : PAGE_CACHE_SIZE);
+ len -= PAGE_CACHE_SIZE;
+ offset += PAGE_CACHE_SIZE;
+ }
+ msg.len = sizeof(bl_msg) + dev->mincount;
+
+ dprintk("%s CALLING USERSPACE DAEMON\n", __func__);
+ add_wait_queue(&bl_wq, &wq);
+ if (rpc_queue_upcall(bl_device_pipe->d_inode, &msg) < 0) {
+ remove_wait_queue(&bl_wq, &wq);
+ goto out;
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&bl_wq, &wq);
+
+ if (reply->status != BL_DEVICE_REQUEST_PROC) {
+ dprintk("%s failed to open device: %d\n",
+ __func__, reply->status);
+ rv = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ bd = nfs4_blkdev_get(MKDEV(reply->major, reply->minor));
+ if (IS_ERR(bd)) {
+ dprintk("%s failed to open device : %ld\n",
+ __func__, PTR_ERR(bd));
+ goto out;
+ }
+
+ rv = kzalloc(sizeof(*rv), GFP_NOFS);
+ if (!rv) {
+ rv = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ rv->bm_mdev = bd;
+ memcpy(&rv->bm_mdevid, &dev->dev_id, sizeof(struct nfs4_deviceid));
+ dprintk("%s Created device %s with bd_block_size %u\n",
+ __func__,
+ bd->bd_disk->disk_name,
+ bd->bd_block_size);
+
+out:
+ kfree(msg.data);
+ return rv;
+}
+
+/* Map deviceid returned by the server to constructed block_device */
+static struct block_device *translate_devid(struct pnfs_layout_hdr *lo,
+ struct nfs4_deviceid *id)
+{
+ struct block_device *rv = NULL;
+ struct block_mount_id *mid;
+ struct pnfs_block_dev *dev;
+
+ dprintk("%s enter, lo=%p, id=%p\n", __func__, lo, id);
+ mid = BLK_ID(lo);
+ spin_lock(&mid->bm_lock);
+ list_for_each_entry(dev, &mid->bm_devlist, bm_node) {
+ if (memcmp(id->data, dev->bm_mdevid.data,
+ NFS4_DEVICEID4_SIZE) == 0) {
+ rv = dev->bm_mdev;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock(&mid->bm_lock);
+ dprintk("%s returning %p\n", __func__, rv);
+ return rv;
+}
+
+/* Tracks info needed to ensure extents in layout obey constraints of spec */
+struct layout_verification {
+ u32 mode; /* R or RW */
+ u64 start; /* Expected start of next non-COW extent */
+ u64 inval; /* Start of INVAL coverage */
+ u64 cowread; /* End of COW read coverage */
+};
+
+/* Verify the extent meets the layout requirements of the pnfs-block draft,
+ * section 2.3.1.
+ */
+static int verify_extent(struct pnfs_block_extent *be,
+ struct layout_verification *lv)
+{
+ if (lv->mode == IOMODE_READ) {
+ if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
+ be->be_state == PNFS_BLOCK_INVALID_DATA)
+ return -EIO;
+ if (be->be_f_offset != lv->start)
+ return -EIO;
+ lv->start += be->be_length;
+ return 0;
+ }
+ /* lv->mode == IOMODE_RW */
+ if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
+ if (be->be_f_offset != lv->start)
+ return -EIO;
+ if (lv->cowread > lv->start)
+ return -EIO;
+ lv->start += be->be_length;
+ lv->inval = lv->start;
+ return 0;
+ } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ if (be->be_f_offset != lv->start)
+ return -EIO;
+ lv->start += be->be_length;
+ return 0;
+ } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
+ if (be->be_f_offset > lv->start)
+ return -EIO;
+ if (be->be_f_offset < lv->inval)
+ return -EIO;
+ if (be->be_f_offset < lv->cowread)
+ return -EIO;
+ /* It looks like you might want to min this with lv->start,
+ * but you really don't.
+ */
+ lv->inval = lv->inval + be->be_length;
+ lv->cowread = be->be_f_offset + be->be_length;
+ return 0;
+ } else
+ return -EIO;
+}
+
+/* XDR decode pnfs_block_layout4 structure */
+int
+nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs4_layoutget_res *lgr, gfp_t gfp_flags)
+{
+ struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
+ int i, status = -EIO;
+ uint32_t count;
+ struct pnfs_block_extent *be = NULL, *save;
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ struct page *scratch;
+ __be32 *p;
+ struct layout_verification lv = {
+ .mode = lgr->range.iomode,
+ .start = lgr->range.offset >> SECTOR_SHIFT,
+ .inval = lgr->range.offset >> SECTOR_SHIFT,
+ .cowread = lgr->range.offset >> SECTOR_SHIFT,
+ };
+ LIST_HEAD(extents);
+
+ dprintk("---> %s\n", __func__);
+
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ return -ENOMEM;
+
+ xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err;
+
+ count = be32_to_cpup(p++);
+
+ dprintk("%s enter, number of extents %i\n", __func__, count);
+ p = xdr_inline_decode(&stream, (28 + NFS4_DEVICEID4_SIZE) * count);
+ if (unlikely(!p))
+ goto out_err;
+
+ /* Decode individual extents, putting them in temporary
+ * staging area until whole layout is decoded to make error
+ * recovery easier.
+ */
+ for (i = 0; i < count; i++) {
+ be = bl_alloc_extent();
+ if (!be) {
+ status = -ENOMEM;
+ goto out_err;
+ }
+ memcpy(&be->be_devid, p, NFS4_DEVICEID4_SIZE);
+ p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
+ be->be_mdev = translate_devid(lo, &be->be_devid);
+ if (!be->be_mdev)
+ goto out_err;
+
+ /* The next three values are read in as bytes,
+ * but stored as 512-byte sector lengths
+ */
+ if (decode_sector_number(&p, &be->be_f_offset) < 0)
+ goto out_err;
+ if (decode_sector_number(&p, &be->be_length) < 0)
+ goto out_err;
+ if (decode_sector_number(&p, &be->be_v_offset) < 0)
+ goto out_err;
+ be->be_state = be32_to_cpup(p++);
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA)
+ be->be_inval = &bl->bl_inval;
+ if (verify_extent(be, &lv)) {
+ dprintk("%s verify failed\n", __func__);
+ goto out_err;
+ }
+ list_add_tail(&be->be_node, &extents);
+ }
+ if (lgr->range.offset + lgr->range.length !=
+ lv.start << SECTOR_SHIFT) {
+ dprintk("%s Final length mismatch\n", __func__);
+ be = NULL;
+ goto out_err;
+ }
+ if (lv.start < lv.cowread) {
+ dprintk("%s Final uncovered COW extent\n", __func__);
+ be = NULL;
+ goto out_err;
+ }
+ /* Extents decoded properly, now try to merge them in to
+ * existing layout extents.
+ */
+ spin_lock(&bl->bl_ext_lock);
+ list_for_each_entry_safe(be, save, &extents, be_node) {
+ list_del(&be->be_node);
+ status = bl_add_merge_extent(bl, be);
+ if (status) {
+ spin_unlock(&bl->bl_ext_lock);
+ /* This is a fairly catastrophic error, as the
+ * entire layout extent lists are now corrupted.
+ * We should have some way to distinguish this.
+ */
+ be = NULL;
+ goto out_err;
+ }
+ }
+ spin_unlock(&bl->bl_ext_lock);
+ status = 0;
+ out:
+ __free_page(scratch);
+ dprintk("%s returns %i\n", __func__, status);
+ return status;
+
+ out_err:
+ bl_put_extent(be);
+ while (!list_empty(&extents)) {
+ be = list_first_entry(&extents, struct pnfs_block_extent,
+ be_node);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ }
+ goto out;
+}
diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c
new file mode 100644
index 00000000000..d055c755807
--- /dev/null
+++ b/fs/nfs/blocklayout/blocklayoutdm.c
@@ -0,0 +1,111 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayoutdm.c
+ *
+ * Module for the NFSv4.1 pNFS block layout driver.
+ *
+ * Copyright (c) 2007 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Fred Isaman <iisaman@umich.edu>
+ * Andy Adamson <andros@citi.umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+
+#include <linux/genhd.h> /* gendisk - used in a dprintk*/
+#include <linux/sched.h>
+#include <linux/hash.h>
+
+#include "blocklayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+static void dev_remove(dev_t dev)
+{
+ struct rpc_pipe_msg msg;
+ struct bl_dev_msg bl_umount_request;
+ struct bl_msg_hdr bl_msg = {
+ .type = BL_DEVICE_UMOUNT,
+ .totallen = sizeof(bl_umount_request),
+ };
+ uint8_t *dataptr;
+ DECLARE_WAITQUEUE(wq, current);
+
+ dprintk("Entering %s\n", __func__);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS);
+ if (!msg.data)
+ goto out;
+
+ memset(&bl_umount_request, 0, sizeof(bl_umount_request));
+ bl_umount_request.major = MAJOR(dev);
+ bl_umount_request.minor = MINOR(dev);
+
+ memcpy(msg.data, &bl_msg, sizeof(bl_msg));
+ dataptr = (uint8_t *) msg.data;
+ memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
+ msg.len = sizeof(bl_msg) + bl_msg.totallen;
+
+ add_wait_queue(&bl_wq, &wq);
+ if (rpc_queue_upcall(bl_device_pipe->d_inode, &msg) < 0) {
+ remove_wait_queue(&bl_wq, &wq);
+ goto out;
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&bl_wq, &wq);
+
+out:
+ kfree(msg.data);
+}
+
+/*
+ * Release meta device
+ */
+static void nfs4_blk_metadev_release(struct pnfs_block_dev *bdev)
+{
+ int rv;
+
+ dprintk("%s Releasing\n", __func__);
+ rv = nfs4_blkdev_put(bdev->bm_mdev);
+ if (rv)
+ printk(KERN_ERR "%s nfs4_blkdev_put returns %d\n",
+ __func__, rv);
+
+ dev_remove(bdev->bm_mdev->bd_dev);
+}
+
+void bl_free_block_dev(struct pnfs_block_dev *bdev)
+{
+ if (bdev) {
+ if (bdev->bm_mdev) {
+ dprintk("%s Removing DM device: %d:%d\n",
+ __func__,
+ MAJOR(bdev->bm_mdev->bd_dev),
+ MINOR(bdev->bm_mdev->bd_dev));
+ nfs4_blk_metadev_release(bdev);
+ }
+ kfree(bdev);
+ }
+}
diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
new file mode 100644
index 00000000000..19fa7b0b8c0
--- /dev/null
+++ b/fs/nfs/blocklayout/extents.c
@@ -0,0 +1,935 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayout.h
+ *
+ * Module for the NFSv4.1 pNFS block layout driver.
+ *
+ * Copyright (c) 2006 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@citi.umich.edu>
+ * Fred Isaman <iisaman@umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+
+#include "blocklayout.h"
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+/* Bit numbers */
+#define EXTENT_INITIALIZED 0
+#define EXTENT_WRITTEN 1
+#define EXTENT_IN_COMMIT 2
+#define INTERNAL_EXISTS MY_MAX_TAGS
+#define INTERNAL_MASK ((1 << INTERNAL_EXISTS) - 1)
+
+/* Returns largest t<=s s.t. t%base==0 */
+static inline sector_t normalize(sector_t s, int base)
+{
+ sector_t tmp = s; /* Since do_div modifies its argument */
+ return s - do_div(tmp, base);
+}
+
+static inline sector_t normalize_up(sector_t s, int base)
+{
+ return normalize(s + base - 1, base);
+}
+
+/* Complete stub using list while determine API wanted */
+
+/* Returns tags, or negative */
+static int32_t _find_entry(struct my_tree *tree, u64 s)
+{
+ struct pnfs_inval_tracking *pos;
+
+ dprintk("%s(%llu) enter\n", __func__, s);
+ list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) {
+ if (pos->it_sector > s)
+ continue;
+ else if (pos->it_sector == s)
+ return pos->it_tags & INTERNAL_MASK;
+ else
+ break;
+ }
+ return -ENOENT;
+}
+
+static inline
+int _has_tag(struct my_tree *tree, u64 s, int32_t tag)
+{
+ int32_t tags;
+
+ dprintk("%s(%llu, %i) enter\n", __func__, s, tag);
+ s = normalize(s, tree->mtt_step_size);
+ tags = _find_entry(tree, s);
+ if ((tags < 0) || !(tags & (1 << tag)))
+ return 0;
+ else
+ return 1;
+}
+
+/* Creates entry with tag, or if entry already exists, unions tag to it.
+ * If storage is not NULL, newly created entry will use it.
+ * Returns number of entries added, or negative on error.
+ */
+static int _add_entry(struct my_tree *tree, u64 s, int32_t tag,
+ struct pnfs_inval_tracking *storage)
+{
+ int found = 0;
+ struct pnfs_inval_tracking *pos;
+
+ dprintk("%s(%llu, %i, %p) enter\n", __func__, s, tag, storage);
+ list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) {
+ if (pos->it_sector > s)
+ continue;
+ else if (pos->it_sector == s) {
+ found = 1;
+ break;
+ } else
+ break;
+ }
+ if (found) {
+ pos->it_tags |= (1 << tag);
+ return 0;
+ } else {
+ struct pnfs_inval_tracking *new;
+ if (storage)
+ new = storage;
+ else {
+ new = kmalloc(sizeof(*new), GFP_NOFS);
+ if (!new)
+ return -ENOMEM;
+ }
+ new->it_sector = s;
+ new->it_tags = (1 << tag);
+ list_add(&new->it_link, &pos->it_link);
+ return 1;
+ }
+}
+
+/* XXXX Really want option to not create */
+/* Over range, unions tag with existing entries, else creates entry with tag */
+static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
+{
+ u64 i;
+
+ dprintk("%s(%i, %llu, %llu) enter\n", __func__, tag, s, length);
+ for (i = normalize(s, tree->mtt_step_size); i < s + length;
+ i += tree->mtt_step_size)
+ if (_add_entry(tree, i, tag, NULL))
+ return -ENOMEM;
+ return 0;
+}
+
+/* Ensure that future operations on given range of tree will not malloc */
+static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
+{
+ u64 start, end, s;
+ int count, i, used = 0, status = -ENOMEM;
+ struct pnfs_inval_tracking **storage;
+
+ dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
+ start = normalize(offset, tree->mtt_step_size);
+ end = normalize_up(offset + length, tree->mtt_step_size);
+ count = (int)(end - start) / (int)tree->mtt_step_size;
+
+ /* Pre-malloc what memory we might need */
+ storage = kmalloc(sizeof(*storage) * count, GFP_NOFS);
+ if (!storage)
+ return -ENOMEM;
+ for (i = 0; i < count; i++) {
+ storage[i] = kmalloc(sizeof(struct pnfs_inval_tracking),
+ GFP_NOFS);
+ if (!storage[i])
+ goto out_cleanup;
+ }
+
+ /* Now need lock - HOW??? */
+
+ for (s = start; s < end; s += tree->mtt_step_size)
+ used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
+
+ /* Unlock - HOW??? */
+ status = 0;
+
+ out_cleanup:
+ for (i = used; i < count; i++) {
+ if (!storage[i])
+ break;
+ kfree(storage[i]);
+ }
+ kfree(storage);
+ return status;
+}
+
+static void set_needs_init(sector_t *array, sector_t offset)
+{
+ sector_t *p = array;
+
+ dprintk("%s enter\n", __func__);
+ if (!p)
+ return;
+ while (*p < offset)
+ p++;
+ if (*p == offset)
+ return;
+ else if (*p == ~0) {
+ *p++ = offset;
+ *p = ~0;
+ return;
+ } else {
+ sector_t *save = p;
+ dprintk("%s Adding %llu\n", __func__, (u64)offset);
+ while (*p != ~0)
+ p++;
+ p++;
+ memmove(save + 1, save, (char *)p - (char *)save);
+ *save = offset;
+ return;
+ }
+}
+
+/* We are relying on page lock to serialize this */
+int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect)
+{
+ int rv;
+
+ spin_lock(&marks->im_lock);
+ rv = _has_tag(&marks->im_tree, isect, EXTENT_INITIALIZED);
+ spin_unlock(&marks->im_lock);
+ return rv;
+}
+
+/* Assume start, end already sector aligned */
+static int
+_range_has_tag(struct my_tree *tree, u64 start, u64 end, int32_t tag)
+{
+ struct pnfs_inval_tracking *pos;
+ u64 expect = 0;
+
+ dprintk("%s(%llu, %llu, %i) enter\n", __func__, start, end, tag);
+ list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) {
+ if (pos->it_sector >= end)
+ continue;
+ if (!expect) {
+ if ((pos->it_sector == end - tree->mtt_step_size) &&
+ (pos->it_tags & (1 << tag))) {
+ expect = pos->it_sector - tree->mtt_step_size;
+ if (pos->it_sector < tree->mtt_step_size || expect < start)
+ return 1;
+ continue;
+ } else {
+ return 0;
+ }
+ }
+ if (pos->it_sector != expect || !(pos->it_tags & (1 << tag)))
+ return 0;
+ expect -= tree->mtt_step_size;
+ if (expect < start)
+ return 1;
+ }
+ return 0;
+}
+
+static int is_range_written(struct pnfs_inval_markings *marks,
+ sector_t start, sector_t end)
+{
+ int rv;
+
+ spin_lock(&marks->im_lock);
+ rv = _range_has_tag(&marks->im_tree, start, end, EXTENT_WRITTEN);
+ spin_unlock(&marks->im_lock);
+ return rv;
+}
+
+/* Marks sectors in [offest, offset_length) as having been initialized.
+ * All lengths are step-aligned, where step is min(pagesize, blocksize).
+ * Notes where partial block is initialized, and helps prepare it for
+ * complete initialization later.
+ */
+/* Currently assumes offset is page-aligned */
+int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
+ sector_t offset, sector_t length,
+ sector_t **pages)
+{
+ sector_t s, start, end;
+ sector_t *array = NULL; /* Pages to mark */
+
+ dprintk("%s(offset=%llu,len=%llu) enter\n",
+ __func__, (u64)offset, (u64)length);
+ s = max((sector_t) 3,
+ 2 * (marks->im_block_size / (PAGE_CACHE_SECTORS)));
+ dprintk("%s set max=%llu\n", __func__, (u64)s);
+ if (pages) {
+ array = kmalloc(s * sizeof(sector_t), GFP_NOFS);
+ if (!array)
+ goto outerr;
+ array[0] = ~0;
+ }
+
+ start = normalize(offset, marks->im_block_size);
+ end = normalize_up(offset + length, marks->im_block_size);
+ if (_preload_range(&marks->im_tree, start, end - start))
+ goto outerr;
+
+ spin_lock(&marks->im_lock);
+
+ for (s = normalize_up(start, PAGE_CACHE_SECTORS);
+ s < offset; s += PAGE_CACHE_SECTORS) {
+ dprintk("%s pre-area pages\n", __func__);
+ /* Portion of used block is not initialized */
+ if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED))
+ set_needs_init(array, s);
+ }
+ if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length))
+ goto out_unlock;
+ for (s = normalize_up(offset + length, PAGE_CACHE_SECTORS);
+ s < end; s += PAGE_CACHE_SECTORS) {
+ dprintk("%s post-area pages\n", __func__);
+ if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED))
+ set_needs_init(array, s);
+ }
+
+ spin_unlock(&marks->im_lock);
+
+ if (pages) {
+ if (array[0] == ~0) {
+ kfree(array);
+ *pages = NULL;
+ } else
+ *pages = array;
+ }
+ return 0;
+
+ out_unlock:
+ spin_unlock(&marks->im_lock);
+ outerr:
+ if (pages) {
+ kfree(array);
+ *pages = NULL;
+ }
+ return -ENOMEM;
+}
+
+/* Marks sectors in [offest, offset+length) as having been written to disk.
+ * All lengths should be block aligned.
+ */
+static int mark_written_sectors(struct pnfs_inval_markings *marks,
+ sector_t offset, sector_t length)
+{
+ int status;
+
+ dprintk("%s(offset=%llu,len=%llu) enter\n", __func__,
+ (u64)offset, (u64)length);
+ spin_lock(&marks->im_lock);
+ status = _set_range(&marks->im_tree, EXTENT_WRITTEN, offset, length);
+ spin_unlock(&marks->im_lock);
+ return status;
+}
+
+static void print_short_extent(struct pnfs_block_short_extent *be)
+{
+ dprintk("PRINT SHORT EXTENT extent %p\n", be);
+ if (be) {
+ dprintk(" be_f_offset %llu\n", (u64)be->bse_f_offset);
+ dprintk(" be_length %llu\n", (u64)be->bse_length);
+ }
+}
+
+static void print_clist(struct list_head *list, unsigned int count)
+{
+ struct pnfs_block_short_extent *be;
+ unsigned int i = 0;
+
+ ifdebug(FACILITY) {
+ printk(KERN_DEBUG "****************\n");
+ printk(KERN_DEBUG "Extent list looks like:\n");
+ list_for_each_entry(be, list, bse_node) {
+ i++;
+ print_short_extent(be);
+ }
+ if (i != count)
+ printk(KERN_DEBUG "\n\nExpected %u entries\n\n\n", count);
+ printk(KERN_DEBUG "****************\n");
+ }
+}
+
+/* Note: In theory, we should do more checking that devid's match between
+ * old and new, but if they don't, the lists are too corrupt to salvage anyway.
+ */
+/* Note this is very similar to bl_add_merge_extent */
+static void add_to_commitlist(struct pnfs_block_layout *bl,
+ struct pnfs_block_short_extent *new)
+{
+ struct list_head *clist = &bl->bl_commit;
+ struct pnfs_block_short_extent *old, *save;
+ sector_t end = new->bse_f_offset + new->bse_length;
+
+ dprintk("%s enter\n", __func__);
+ print_short_extent(new);
+ print_clist(clist, bl->bl_count);
+ bl->bl_count++;
+ /* Scan for proper place to insert, extending new to the left
+ * as much as possible.
+ */
+ list_for_each_entry_safe(old, save, clist, bse_node) {
+ if (new->bse_f_offset < old->bse_f_offset)
+ break;
+ if (end <= old->bse_f_offset + old->bse_length) {
+ /* Range is already in list */
+ bl->bl_count--;
+ kfree(new);
+ return;
+ } else if (new->bse_f_offset <=
+ old->bse_f_offset + old->bse_length) {
+ /* new overlaps or abuts existing be */
+ if (new->bse_mdev == old->bse_mdev) {
+ /* extend new to fully replace old */
+ new->bse_length += new->bse_f_offset -
+ old->bse_f_offset;
+ new->bse_f_offset = old->bse_f_offset;
+ list_del(&old->bse_node);
+ bl->bl_count--;
+ kfree(old);
+ }
+ }
+ }
+ /* Note that if we never hit the above break, old will not point to a
+ * valid extent. However, in that case &old->bse_node==list.
+ */
+ list_add_tail(&new->bse_node, &old->bse_node);
+ /* Scan forward for overlaps. If we find any, extend new and
+ * remove the overlapped extent.
+ */
+ old = list_prepare_entry(new, clist, bse_node);
+ list_for_each_entry_safe_continue(old, save, clist, bse_node) {
+ if (end < old->bse_f_offset)
+ break;
+ /* new overlaps or abuts old */
+ if (new->bse_mdev == old->bse_mdev) {
+ if (end < old->bse_f_offset + old->bse_length) {
+ /* extend new to fully cover old */
+ end = old->bse_f_offset + old->bse_length;
+ new->bse_length = end - new->bse_f_offset;
+ }
+ list_del(&old->bse_node);
+ bl->bl_count--;
+ kfree(old);
+ }
+ }
+ dprintk("%s: after merging\n", __func__);
+ print_clist(clist, bl->bl_count);
+}
+
+/* Note the range described by offset, length is guaranteed to be contained
+ * within be.
+ */
+int bl_mark_for_commit(struct pnfs_block_extent *be,
+ sector_t offset, sector_t length)
+{
+ sector_t new_end, end = offset + length;
+ struct pnfs_block_short_extent *new;
+ struct pnfs_block_layout *bl = container_of(be->be_inval,
+ struct pnfs_block_layout,
+ bl_inval);
+
+ new = kmalloc(sizeof(*new), GFP_NOFS);
+ if (!new)
+ return -ENOMEM;
+
+ mark_written_sectors(be->be_inval, offset, length);
+ /* We want to add the range to commit list, but it must be
+ * block-normalized, and verified that the normalized range has
+ * been entirely written to disk.
+ */
+ new->bse_f_offset = offset;
+ offset = normalize(offset, bl->bl_blocksize);
+ if (offset < new->bse_f_offset) {
+ if (is_range_written(be->be_inval, offset, new->bse_f_offset))
+ new->bse_f_offset = offset;
+ else
+ new->bse_f_offset = offset + bl->bl_blocksize;
+ }
+ new_end = normalize_up(end, bl->bl_blocksize);
+ if (end < new_end) {
+ if (is_range_written(be->be_inval, end, new_end))
+ end = new_end;
+ else
+ end = new_end - bl->bl_blocksize;
+ }
+ if (end <= new->bse_f_offset) {
+ kfree(new);
+ return 0;
+ }
+ new->bse_length = end - new->bse_f_offset;
+ new->bse_devid = be->be_devid;
+ new->bse_mdev = be->be_mdev;
+
+ spin_lock(&bl->bl_ext_lock);
+ /* new will be freed, either by add_to_commitlist if it decides not
+ * to use it, or after LAYOUTCOMMIT uses it in the commitlist.
+ */
+ add_to_commitlist(bl, new);
+ spin_unlock(&bl->bl_ext_lock);
+ return 0;
+}
+
+static void print_bl_extent(struct pnfs_block_extent *be)
+{
+ dprintk("PRINT EXTENT extent %p\n", be);
+ if (be) {
+ dprintk(" be_f_offset %llu\n", (u64)be->be_f_offset);
+ dprintk(" be_length %llu\n", (u64)be->be_length);
+ dprintk(" be_v_offset %llu\n", (u64)be->be_v_offset);
+ dprintk(" be_state %d\n", be->be_state);
+ }
+}
+
+static void
+destroy_extent(struct kref *kref)
+{
+ struct pnfs_block_extent *be;
+
+ be = container_of(kref, struct pnfs_block_extent, be_refcnt);
+ dprintk("%s be=%p\n", __func__, be);
+ kfree(be);
+}
+
+void
+bl_put_extent(struct pnfs_block_extent *be)
+{
+ if (be) {
+ dprintk("%s enter %p (%i)\n", __func__, be,
+ atomic_read(&be->be_refcnt.refcount));
+ kref_put(&be->be_refcnt, destroy_extent);
+ }
+}
+
+struct pnfs_block_extent *bl_alloc_extent(void)
+{
+ struct pnfs_block_extent *be;
+
+ be = kmalloc(sizeof(struct pnfs_block_extent), GFP_NOFS);
+ if (!be)
+ return NULL;
+ INIT_LIST_HEAD(&be->be_node);
+ kref_init(&be->be_refcnt);
+ be->be_inval = NULL;
+ return be;
+}
+
+static void print_elist(struct list_head *list)
+{
+ struct pnfs_block_extent *be;
+ dprintk("****************\n");
+ dprintk("Extent list looks like:\n");
+ list_for_each_entry(be, list, be_node) {
+ print_bl_extent(be);
+ }
+ dprintk("****************\n");
+}
+
+static inline int
+extents_consistent(struct pnfs_block_extent *old, struct pnfs_block_extent *new)
+{
+ /* Note this assumes new->be_f_offset >= old->be_f_offset */
+ return (new->be_state == old->be_state) &&
+ ((new->be_state == PNFS_BLOCK_NONE_DATA) ||
+ ((new->be_v_offset - old->be_v_offset ==
+ new->be_f_offset - old->be_f_offset) &&
+ new->be_mdev == old->be_mdev));
+}
+
+/* Adds new to appropriate list in bl, modifying new and removing existing
+ * extents as appropriate to deal with overlaps.
+ *
+ * See bl_find_get_extent for list constraints.
+ *
+ * Refcount on new is already set. If end up not using it, or error out,
+ * need to put the reference.
+ *
+ * bl->bl_ext_lock is held by caller.
+ */
+int
+bl_add_merge_extent(struct pnfs_block_layout *bl,
+ struct pnfs_block_extent *new)
+{
+ struct pnfs_block_extent *be, *tmp;
+ sector_t end = new->be_f_offset + new->be_length;
+ struct list_head *list;
+
+ dprintk("%s enter with be=%p\n", __func__, new);
+ print_bl_extent(new);
+ list = &bl->bl_extents[bl_choose_list(new->be_state)];
+ print_elist(list);
+
+ /* Scan for proper place to insert, extending new to the left
+ * as much as possible.
+ */
+ list_for_each_entry_safe_reverse(be, tmp, list, be_node) {
+ if (new->be_f_offset >= be->be_f_offset + be->be_length)
+ break;
+ if (new->be_f_offset >= be->be_f_offset) {
+ if (end <= be->be_f_offset + be->be_length) {
+ /* new is a subset of existing be*/
+ if (extents_consistent(be, new)) {
+ dprintk("%s: new is subset, ignoring\n",
+ __func__);
+ bl_put_extent(new);
+ return 0;
+ } else {
+ goto out_err;
+ }
+ } else {
+ /* |<-- be -->|
+ * |<-- new -->| */
+ if (extents_consistent(be, new)) {
+ /* extend new to fully replace be */
+ new->be_length += new->be_f_offset -
+ be->be_f_offset;
+ new->be_f_offset = be->be_f_offset;
+ new->be_v_offset = be->be_v_offset;
+ dprintk("%s: removing %p\n", __func__, be);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ } else {
+ goto out_err;
+ }
+ }
+ } else if (end >= be->be_f_offset + be->be_length) {
+ /* new extent overlap existing be */
+ if (extents_consistent(be, new)) {
+ /* extend new to fully replace be */
+ dprintk("%s: removing %p\n", __func__, be);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ } else {
+ goto out_err;
+ }
+ } else if (end > be->be_f_offset) {
+ /* |<-- be -->|
+ *|<-- new -->| */
+ if (extents_consistent(new, be)) {
+ /* extend new to fully replace be */
+ new->be_length += be->be_f_offset + be->be_length -
+ new->be_f_offset - new->be_length;
+ dprintk("%s: removing %p\n", __func__, be);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ } else {
+ goto out_err;
+ }
+ }
+ }
+ /* Note that if we never hit the above break, be will not point to a
+ * valid extent. However, in that case &be->be_node==list.
+ */
+ list_add(&new->be_node, &be->be_node);
+ dprintk("%s: inserting new\n", __func__);
+ print_elist(list);
+ /* FIXME - The per-list consistency checks have all been done,
+ * should now check cross-list consistency.
+ */
+ return 0;
+
+ out_err:
+ bl_put_extent(new);
+ return -EIO;
+}
+
+/* Returns extent, or NULL. If a second READ extent exists, it is returned
+ * in cow_read, if given.
+ *
+ * The extents are kept in two seperate ordered lists, one for READ and NONE,
+ * one for READWRITE and INVALID. Within each list, we assume:
+ * 1. Extents are ordered by file offset.
+ * 2. For any given isect, there is at most one extents that matches.
+ */
+struct pnfs_block_extent *
+bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect,
+ struct pnfs_block_extent **cow_read)
+{
+ struct pnfs_block_extent *be, *cow, *ret;
+ int i;
+
+ dprintk("%s enter with isect %llu\n", __func__, (u64)isect);
+ cow = ret = NULL;
+ spin_lock(&bl->bl_ext_lock);
+ for (i = 0; i < EXTENT_LISTS; i++) {
+ list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) {
+ if (isect >= be->be_f_offset + be->be_length)
+ break;
+ if (isect >= be->be_f_offset) {
+ /* We have found an extent */
+ dprintk("%s Get %p (%i)\n", __func__, be,
+ atomic_read(&be->be_refcnt.refcount));
+ kref_get(&be->be_refcnt);
+ if (!ret)
+ ret = be;
+ else if (be->be_state != PNFS_BLOCK_READ_DATA)
+ bl_put_extent(be);
+ else
+ cow = be;
+ break;
+ }
+ }
+ if (ret &&
+ (!cow_read || ret->be_state != PNFS_BLOCK_INVALID_DATA))
+ break;
+ }
+ spin_unlock(&bl->bl_ext_lock);
+ if (cow_read)
+ *cow_read = cow;
+ print_bl_extent(ret);
+ return ret;
+}
+
+/* Similar to bl_find_get_extent, but called with lock held, and ignores cow */
+static struct pnfs_block_extent *
+bl_find_get_extent_locked(struct pnfs_block_layout *bl, sector_t isect)
+{
+ struct pnfs_block_extent *be, *ret = NULL;
+ int i;
+
+ dprintk("%s enter with isect %llu\n", __func__, (u64)isect);
+ for (i = 0; i < EXTENT_LISTS; i++) {
+ if (ret)
+ break;
+ list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) {
+ if (isect >= be->be_f_offset + be->be_length)
+ break;
+ if (isect >= be->be_f_offset) {
+ /* We have found an extent */
+ dprintk("%s Get %p (%i)\n", __func__, be,
+ atomic_read(&be->be_refcnt.refcount));
+ kref_get(&be->be_refcnt);
+ ret = be;
+ break;
+ }
+ }
+ }
+ print_bl_extent(ret);
+ return ret;
+}
+
+int
+encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *arg)
+{
+ struct pnfs_block_short_extent *lce, *save;
+ unsigned int count = 0;
+ __be32 *p, *xdr_start;
+
+ dprintk("%s enter\n", __func__);
+ /* BUG - creation of bl_commit is buggy - need to wait for
+ * entire block to be marked WRITTEN before it can be added.
+ */
+ spin_lock(&bl->bl_ext_lock);
+ /* Want to adjust for possible truncate */
+ /* We now want to adjust argument range */
+
+ /* XDR encode the ranges found */
+ xdr_start = xdr_reserve_space(xdr, 8);
+ if (!xdr_start)
+ goto out;
+ list_for_each_entry_safe(lce, save, &bl->bl_commit, bse_node) {
+ p = xdr_reserve_space(xdr, 7 * 4 + sizeof(lce->bse_devid.data));
+ if (!p)
+ break;
+ p = xdr_encode_opaque_fixed(p, lce->bse_devid.data, NFS4_DEVICEID4_SIZE);
+ p = xdr_encode_hyper(p, lce->bse_f_offset << SECTOR_SHIFT);
+ p = xdr_encode_hyper(p, lce->bse_length << SECTOR_SHIFT);
+ p = xdr_encode_hyper(p, 0LL);
+ *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA);
+ list_del(&lce->bse_node);
+ list_add_tail(&lce->bse_node, &bl->bl_committing);
+ bl->bl_count--;
+ count++;
+ }
+ xdr_start[0] = cpu_to_be32((xdr->p - xdr_start - 1) * 4);
+ xdr_start[1] = cpu_to_be32(count);
+out:
+ spin_unlock(&bl->bl_ext_lock);
+ dprintk("%s found %i ranges\n", __func__, count);
+ return 0;
+}
+
+/* Helper function to set_to_rw that initialize a new extent */
+static void
+_prep_new_extent(struct pnfs_block_extent *new,
+ struct pnfs_block_extent *orig,
+ sector_t offset, sector_t length, int state)
+{
+ kref_init(&new->be_refcnt);
+ /* don't need to INIT_LIST_HEAD(&new->be_node) */
+ memcpy(&new->be_devid, &orig->be_devid, sizeof(struct nfs4_deviceid));
+ new->be_mdev = orig->be_mdev;
+ new->be_f_offset = offset;
+ new->be_length = length;
+ new->be_v_offset = orig->be_v_offset - orig->be_f_offset + offset;
+ new->be_state = state;
+ new->be_inval = orig->be_inval;
+}
+
+/* Tries to merge be with extent in front of it in list.
+ * Frees storage if not used.
+ */
+static struct pnfs_block_extent *
+_front_merge(struct pnfs_block_extent *be, struct list_head *head,
+ struct pnfs_block_extent *storage)
+{
+ struct pnfs_block_extent *prev;
+
+ if (!storage)
+ goto no_merge;
+ if (&be->be_node == head || be->be_node.prev == head)
+ goto no_merge;
+ prev = list_entry(be->be_node.prev, struct pnfs_block_extent, be_node);
+ if ((prev->be_f_offset + prev->be_length != be->be_f_offset) ||
+ !extents_consistent(prev, be))
+ goto no_merge;
+ _prep_new_extent(storage, prev, prev->be_f_offset,
+ prev->be_length + be->be_length, prev->be_state);
+ list_replace(&prev->be_node, &storage->be_node);
+ bl_put_extent(prev);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ return storage;
+
+ no_merge:
+ kfree(storage);
+ return be;
+}
+
+static u64
+set_to_rw(struct pnfs_block_layout *bl, u64 offset, u64 length)
+{
+ u64 rv = offset + length;
+ struct pnfs_block_extent *be, *e1, *e2, *e3, *new, *old;
+ struct pnfs_block_extent *children[3];
+ struct pnfs_block_extent *merge1 = NULL, *merge2 = NULL;
+ int i = 0, j;
+
+ dprintk("%s(%llu, %llu)\n", __func__, offset, length);
+ /* Create storage for up to three new extents e1, e2, e3 */
+ e1 = kmalloc(sizeof(*e1), GFP_ATOMIC);
+ e2 = kmalloc(sizeof(*e2), GFP_ATOMIC);
+ e3 = kmalloc(sizeof(*e3), GFP_ATOMIC);
+ /* BUG - we are ignoring any failure */
+ if (!e1 || !e2 || !e3)
+ goto out_nosplit;
+
+ spin_lock(&bl->bl_ext_lock);
+ be = bl_find_get_extent_locked(bl, offset);
+ rv = be->be_f_offset + be->be_length;
+ if (be->be_state != PNFS_BLOCK_INVALID_DATA) {
+ spin_unlock(&bl->bl_ext_lock);
+ goto out_nosplit;
+ }
+ /* Add e* to children, bumping e*'s krefs */
+ if (be->be_f_offset != offset) {
+ _prep_new_extent(e1, be, be->be_f_offset,
+ offset - be->be_f_offset,
+ PNFS_BLOCK_INVALID_DATA);
+ children[i++] = e1;
+ print_bl_extent(e1);
+ } else
+ merge1 = e1;
+ _prep_new_extent(e2, be, offset,
+ min(length, be->be_f_offset + be->be_length - offset),
+ PNFS_BLOCK_READWRITE_DATA);
+ children[i++] = e2;
+ print_bl_extent(e2);
+ if (offset + length < be->be_f_offset + be->be_length) {
+ _prep_new_extent(e3, be, e2->be_f_offset + e2->be_length,
+ be->be_f_offset + be->be_length -
+ offset - length,
+ PNFS_BLOCK_INVALID_DATA);
+ children[i++] = e3;
+ print_bl_extent(e3);
+ } else
+ merge2 = e3;
+
+ /* Remove be from list, and insert the e* */
+ /* We don't get refs on e*, since this list is the base reference
+ * set when init'ed.
+ */
+ if (i < 3)
+ children[i] = NULL;
+ new = children[0];
+ list_replace(&be->be_node, &new->be_node);
+ bl_put_extent(be);
+ new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge1);
+ for (j = 1; j < i; j++) {
+ old = new;
+ new = children[j];
+ list_add(&new->be_node, &old->be_node);
+ }
+ if (merge2) {
+ /* This is a HACK, should just create a _back_merge function */
+ new = list_entry(new->be_node.next,
+ struct pnfs_block_extent, be_node);
+ new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge2);
+ }
+ spin_unlock(&bl->bl_ext_lock);
+
+ /* Since we removed the base reference above, be is now scheduled for
+ * destruction.
+ */
+ bl_put_extent(be);
+ dprintk("%s returns %llu after split\n", __func__, rv);
+ return rv;
+
+ out_nosplit:
+ kfree(e1);
+ kfree(e2);
+ kfree(e3);
+ dprintk("%s returns %llu without splitting\n", __func__, rv);
+ return rv;
+}
+
+void
+clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
+ const struct nfs4_layoutcommit_args *arg,
+ int status)
+{
+ struct pnfs_block_short_extent *lce, *save;
+
+ dprintk("%s status %d\n", __func__, status);
+ list_for_each_entry_safe(lce, save, &bl->bl_committing, bse_node) {
+ if (likely(!status)) {
+ u64 offset = lce->bse_f_offset;
+ u64 end = offset + lce->bse_length;
+
+ do {
+ offset = set_to_rw(bl, offset, end - offset);
+ } while (offset < end);
+ list_del(&lce->bse_node);
+
+ kfree(lce);
+ } else {
+ list_del(&lce->bse_node);
+ spin_lock(&bl->bl_ext_lock);
+ add_to_commitlist(bl, lce);
+ spin_unlock(&bl->bl_ext_lock);
+ }
+ }
+}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 19ea7d9c75e..5833fbbf59b 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -105,7 +105,7 @@ struct rpc_program nfs_program = {
.nrvers = ARRAY_SIZE(nfs_version),
.version = nfs_version,
.stats = &nfs_rpcstat,
- .pipe_dir_name = "/nfs",
+ .pipe_dir_name = NFS_PIPE_DIRNAME,
};
struct rpc_stat nfs_rpcstat = {
@@ -904,7 +904,9 @@ error:
/*
* Load up the server record from information gained in an fsinfo record
*/
-static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *fsinfo)
+static void nfs_server_set_fsinfo(struct nfs_server *server,
+ struct nfs_fh *mntfh,
+ struct nfs_fsinfo *fsinfo)
{
unsigned long max_rpc_payload;
@@ -934,7 +936,8 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *
if (server->wsize > NFS_MAX_FILE_IO_SIZE)
server->wsize = NFS_MAX_FILE_IO_SIZE;
server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- set_pnfs_layoutdriver(server, fsinfo->layouttype);
+ server->pnfs_blksize = fsinfo->blksize;
+ set_pnfs_layoutdriver(server, mntfh, fsinfo->layouttype);
server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
@@ -980,7 +983,7 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
if (error < 0)
goto out_error;
- nfs_server_set_fsinfo(server, &fsinfo);
+ nfs_server_set_fsinfo(server, mntfh, &fsinfo);
/* Get some general file system info */
if (server->namelen == 0) {
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 57f578e2560..b238d95ac48 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -134,18 +134,19 @@ const struct inode_operations nfs4_dir_inode_operations = {
#endif /* CONFIG_NFS_V4 */
-static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct rpc_cred *cred)
+static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir, struct rpc_cred *cred)
{
struct nfs_open_dir_context *ctx;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx != NULL) {
ctx->duped = 0;
+ ctx->attr_gencount = NFS_I(dir)->attr_gencount;
ctx->dir_cookie = 0;
ctx->dup_cookie = 0;
ctx->cred = get_rpccred(cred);
- } else
- ctx = ERR_PTR(-ENOMEM);
- return ctx;
+ return ctx;
+ }
+ return ERR_PTR(-ENOMEM);
}
static void put_nfs_open_dir_context(struct nfs_open_dir_context *ctx)
@@ -173,7 +174,7 @@ nfs_opendir(struct inode *inode, struct file *filp)
cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
- ctx = alloc_nfs_open_dir_context(cred);
+ ctx = alloc_nfs_open_dir_context(inode, cred);
if (IS_ERR(ctx)) {
res = PTR_ERR(ctx);
goto out;
@@ -323,7 +324,6 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri
{
loff_t diff = desc->file->f_pos - desc->current_index;
unsigned int index;
- struct nfs_open_dir_context *ctx = desc->file->private_data;
if (diff < 0)
goto out_eof;
@@ -336,7 +336,6 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri
index = (unsigned int)diff;
*desc->dir_cookie = array->array[index].cookie;
desc->cache_entry_index = index;
- ctx->duped = 0;
return 0;
out_eof:
desc->eof = 1;
@@ -349,14 +348,34 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
int i;
loff_t new_pos;
int status = -EAGAIN;
- struct nfs_open_dir_context *ctx = desc->file->private_data;
for (i = 0; i < array->size; i++) {
if (array->array[i].cookie == *desc->dir_cookie) {
+ struct nfs_inode *nfsi = NFS_I(desc->file->f_path.dentry->d_inode);
+ struct nfs_open_dir_context *ctx = desc->file->private_data;
+
new_pos = desc->current_index + i;
- if (new_pos < desc->file->f_pos) {
+ if (ctx->attr_gencount != nfsi->attr_gencount
+ || (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))) {
+ ctx->duped = 0;
+ ctx->attr_gencount = nfsi->attr_gencount;
+ } else if (new_pos < desc->file->f_pos) {
+ if (ctx->duped > 0
+ && ctx->dup_cookie == *desc->dir_cookie) {
+ if (printk_ratelimit()) {
+ pr_notice("NFS: directory %s/%s contains a readdir loop."
+ "Please contact your server vendor. "
+ "The file: %s has duplicate cookie %llu\n",
+ desc->file->f_dentry->d_parent->d_name.name,
+ desc->file->f_dentry->d_name.name,
+ array->array[i].string.name,
+ *desc->dir_cookie);
+ }
+ status = -ELOOP;
+ goto out;
+ }
ctx->dup_cookie = *desc->dir_cookie;
- ctx->duped = 1;
+ ctx->duped = -1;
}
desc->file->f_pos = new_pos;
desc->cache_entry_index = i;
@@ -368,6 +387,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
if (*desc->dir_cookie == array->last_cookie)
desc->eof = 1;
}
+out:
return status;
}
@@ -740,19 +760,6 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
struct nfs_cache_array *array = NULL;
struct nfs_open_dir_context *ctx = file->private_data;
- if (ctx->duped != 0 && ctx->dup_cookie == *desc->dir_cookie) {
- if (printk_ratelimit()) {
- pr_notice("NFS: directory %s/%s contains a readdir loop. "
- "Please contact your server vendor. "
- "Offending cookie: %llu\n",
- file->f_dentry->d_parent->d_name.name,
- file->f_dentry->d_name.name,
- *desc->dir_cookie);
- }
- res = -ELOOP;
- goto out;
- }
-
array = nfs_readdir_get_array(desc->page);
if (IS_ERR(array)) {
res = PTR_ERR(array);
@@ -774,6 +781,8 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
*desc->dir_cookie = array->array[i+1].cookie;
else
*desc->dir_cookie = array->last_cookie;
+ if (ctx->duped != 0)
+ ctx->duped = 1;
}
if (array->eof_index >= 0)
desc->eof = 1;
@@ -805,6 +814,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
struct page *page = NULL;
int status;
struct inode *inode = desc->file->f_path.dentry->d_inode;
+ struct nfs_open_dir_context *ctx = desc->file->private_data;
dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n",
(unsigned long long)*desc->dir_cookie);
@@ -818,6 +828,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
desc->page_index = 0;
desc->last_cookie = *desc->dir_cookie;
desc->page = page;
+ ctx->duped = 0;
status = nfs_readdir_xdr_to_array(desc, page, inode);
if (status < 0)
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index e49e73107e6..7ef23979896 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -415,7 +415,7 @@ fail:
}
int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
- mode_t mode)
+ umode_t mode)
{
struct posix_acl *dfacl, *acl;
int error = 0;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 38053d823eb..85f1690ca08 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -316,7 +316,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nfs_open_context *ctx)
{
struct nfs3_createdata *data;
- mode_t mode = sattr->ia_mode;
+ umode_t mode = sattr->ia_mode;
int status = -ENOMEM;
dprintk("NFS call create %s\n", dentry->d_name.name);
@@ -562,7 +562,7 @@ static int
nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct nfs3_createdata *data;
- int mode = sattr->ia_mode;
+ umode_t mode = sattr->ia_mode;
int status = -ENOMEM;
dprintk("NFS call mkdir %s\n", dentry->d_name.name);
@@ -681,7 +681,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
dev_t rdev)
{
struct nfs3_createdata *data;
- mode_t mode = sattr->ia_mode;
+ umode_t mode = sattr->ia_mode;
int status = -ENOMEM;
dprintk("NFS call mknod %s %u:%u\n", dentry->d_name.name,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 1909ee8be35..1ec1a85fa71 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -318,7 +318,7 @@ extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
extern const u32 nfs4_fattr_bitmap[2];
extern const u32 nfs4_statfs_bitmap[2];
extern const u32 nfs4_pathconf_bitmap[2];
-extern const u32 nfs4_fsinfo_bitmap[2];
+extern const u32 nfs4_fsinfo_bitmap[3];
extern const u32 nfs4_fs_locations_bitmap[2];
/* nfs4renewd.c */
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index be93a622872..e8915d4840a 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -170,7 +170,7 @@ filelayout_set_layoutcommit(struct nfs_write_data *wdata)
pnfs_set_layoutcommit(wdata);
dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino,
- (unsigned long) wdata->lseg->pls_end_pos);
+ (unsigned long) NFS_I(wdata->inode)->layout->plh_lwb);
}
/*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 079614deca3..8c77039e7a8 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -140,12 +140,13 @@ const u32 nfs4_pathconf_bitmap[2] = {
0
};
-const u32 nfs4_fsinfo_bitmap[2] = { FATTR4_WORD0_MAXFILESIZE
+const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
| FATTR4_WORD0_MAXREAD
| FATTR4_WORD0_MAXWRITE
| FATTR4_WORD0_LEASE_TIME,
FATTR4_WORD1_TIME_DELTA
- | FATTR4_WORD1_FS_LAYOUT_TYPES
+ | FATTR4_WORD1_FS_LAYOUT_TYPES,
+ FATTR4_WORD2_LAYOUT_BLKSIZE
};
const u32 nfs4_fs_locations_bitmap[2] = {
@@ -5834,6 +5835,54 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
return status;
}
+/*
+ * Retrieve the list of Data Server devices from the MDS.
+ */
+static int _nfs4_getdevicelist(struct nfs_server *server,
+ const struct nfs_fh *fh,
+ struct pnfs_devicelist *devlist)
+{
+ struct nfs4_getdevicelist_args args = {
+ .fh = fh,
+ .layoutclass = server->pnfs_curr_ld->id,
+ };
+ struct nfs4_getdevicelist_res res = {
+ .devlist = devlist,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ dprintk("--> %s\n", __func__);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
+ &res.seq_res, 0);
+ dprintk("<-- %s status=%d\n", __func__, status);
+ return status;
+}
+
+int nfs4_proc_getdevicelist(struct nfs_server *server,
+ const struct nfs_fh *fh,
+ struct pnfs_devicelist *devlist)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_getdevicelist(server, fh, devlist),
+ &exception);
+ } while (exception.retry);
+
+ dprintk("%s: err=%d, num_devs=%u\n", __func__,
+ err, devlist->num_devs);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
+
static int
_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
{
@@ -5912,9 +5961,16 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
static void nfs4_layoutcommit_release(void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
+ struct pnfs_layout_segment *lseg, *tmp;
+ pnfs_cleanup_layoutcommit(data);
/* Matched by references in pnfs_set_layoutcommit */
- put_lseg(data->lseg);
+ list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
+ list_del_init(&lseg->pls_lc_list);
+ if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
+ &lseg->pls_flags))
+ put_lseg(lseg);
+ }
put_rpccred(data->cred);
kfree(data);
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c191a9baa42..1dce12f41a4 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -113,7 +113,11 @@ static int nfs4_stat_to_errno(int);
#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
#define decode_restorefh_maxsz (op_decode_hdr_maxsz)
#define encode_fsinfo_maxsz (encode_getattr_maxsz)
-#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 15)
+/* The 5 accounts for the PNFS attributes, and assumes that at most three
+ * layout types will be returned.
+ */
+#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + \
+ nfs4_fattr_bitmap_maxsz + 4 + 8 + 5)
#define encode_renew_maxsz (op_encode_hdr_maxsz + 3)
#define decode_renew_maxsz (op_decode_hdr_maxsz)
#define encode_setclientid_maxsz \
@@ -314,6 +318,17 @@ static int nfs4_stat_to_errno(int);
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
#define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4)
#define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4)
+#define encode_getdevicelist_maxsz (op_encode_hdr_maxsz + 4 + \
+ encode_verifier_maxsz)
+#define decode_getdevicelist_maxsz (op_decode_hdr_maxsz + \
+ 2 /* nfs_cookie4 gdlr_cookie */ + \
+ decode_verifier_maxsz \
+ /* verifier4 gdlr_verifier */ + \
+ 1 /* gdlr_deviceid_list count */ + \
+ XDR_QUADLEN(NFS4_PNFS_GETDEVLIST_MAXNUM * \
+ NFS4_DEVICEID4_SIZE) \
+ /* gdlr_deviceid_list */ + \
+ 1 /* bool gdlr_eof */)
#define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + 4 + \
XDR_QUADLEN(NFS4_DEVICEID4_SIZE))
#define decode_getdeviceinfo_maxsz (op_decode_hdr_maxsz + \
@@ -748,6 +763,14 @@ static int nfs4_stat_to_errno(int);
#define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_reclaim_complete_maxsz)
+#define NFS4_enc_getdevicelist_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_getdevicelist_maxsz)
+#define NFS4_dec_getdevicelist_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_getdevicelist_maxsz)
#define NFS4_enc_getdeviceinfo_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz +\
encode_getdeviceinfo_maxsz)
@@ -1104,6 +1127,35 @@ static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm
hdr->replen += decode_getattr_maxsz;
}
+static void
+encode_getattr_three(struct xdr_stream *xdr,
+ uint32_t bm0, uint32_t bm1, uint32_t bm2,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_GETATTR);
+ if (bm2) {
+ p = reserve_space(xdr, 16);
+ *p++ = cpu_to_be32(3);
+ *p++ = cpu_to_be32(bm0);
+ *p++ = cpu_to_be32(bm1);
+ *p = cpu_to_be32(bm2);
+ } else if (bm1) {
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(2);
+ *p++ = cpu_to_be32(bm0);
+ *p = cpu_to_be32(bm1);
+ } else {
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(bm0);
+ }
+ hdr->nops++;
+ hdr->replen += decode_getattr_maxsz;
+}
+
static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
encode_getattr_two(xdr, bitmask[0] & nfs4_fattr_bitmap[0],
@@ -1112,8 +1164,11 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c
static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
- encode_getattr_two(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0],
- bitmask[1] & nfs4_fsinfo_bitmap[1], hdr);
+ encode_getattr_three(xdr,
+ bitmask[0] & nfs4_fsinfo_bitmap[0],
+ bitmask[1] & nfs4_fsinfo_bitmap[1],
+ bitmask[2] & nfs4_fsinfo_bitmap[2],
+ hdr);
}
static void encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
@@ -1855,6 +1910,26 @@ static void encode_sequence(struct xdr_stream *xdr,
#ifdef CONFIG_NFS_V4_1
static void
+encode_getdevicelist(struct xdr_stream *xdr,
+ const struct nfs4_getdevicelist_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ nfs4_verifier dummy = {
+ .data = "dummmmmy",
+ };
+
+ p = reserve_space(xdr, 20);
+ *p++ = cpu_to_be32(OP_GETDEVICELIST);
+ *p++ = cpu_to_be32(args->layoutclass);
+ *p++ = cpu_to_be32(NFS4_PNFS_GETDEVLIST_MAXNUM);
+ xdr_encode_hyper(p, 0ULL); /* cookie */
+ encode_nfs4_verifier(xdr, &dummy);
+ hdr->nops++;
+ hdr->replen += decode_getdevicelist_maxsz;
+}
+
+static void
encode_getdeviceinfo(struct xdr_stream *xdr,
const struct nfs4_getdeviceinfo_args *args,
struct compound_hdr *hdr)
@@ -1916,7 +1991,7 @@ encode_layoutcommit(struct xdr_stream *xdr,
*p++ = cpu_to_be32(OP_LAYOUTCOMMIT);
/* Only whole file layouts */
p = xdr_encode_hyper(p, 0); /* offset */
- p = xdr_encode_hyper(p, NFS4_MAX_UINT64); /* length */
+ p = xdr_encode_hyper(p, args->lastbytewritten + 1); /* length */
*p++ = cpu_to_be32(0); /* reclaim */
p = xdr_encode_opaque_fixed(p, args->stateid.data, NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(1); /* newoffset = TRUE */
@@ -2604,7 +2679,7 @@ static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
struct compound_hdr hdr = {
.nops = 0,
};
- const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
+ const u32 lease_bitmap[3] = { FATTR4_WORD0_LEASE_TIME };
encode_compound_hdr(xdr, req, &hdr);
encode_setclientid_confirm(xdr, arg, &hdr);
@@ -2748,7 +2823,7 @@ static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
};
- const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
+ const u32 lease_bitmap[3] = { FATTR4_WORD0_LEASE_TIME };
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->la_seq_args, &hdr);
@@ -2775,6 +2850,24 @@ static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
}
/*
+ * Encode GETDEVICELIST request
+ */
+static void nfs4_xdr_enc_getdevicelist(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_getdevicelist_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_getdevicelist(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode GETDEVICEINFO request
*/
static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
@@ -3011,14 +3104,17 @@ static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
goto out_overflow;
bmlen = be32_to_cpup(p);
- bitmap[0] = bitmap[1] = 0;
+ bitmap[0] = bitmap[1] = bitmap[2] = 0;
p = xdr_inline_decode(xdr, (bmlen << 2));
if (unlikely(!p))
goto out_overflow;
if (bmlen > 0) {
bitmap[0] = be32_to_cpup(p++);
- if (bmlen > 1)
- bitmap[1] = be32_to_cpup(p);
+ if (bmlen > 1) {
+ bitmap[1] = be32_to_cpup(p++);
+ if (bmlen > 2)
+ bitmap[2] = be32_to_cpup(p);
+ }
}
return 0;
out_overflow:
@@ -3050,8 +3146,9 @@ static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint3
return ret;
bitmap[0] &= ~FATTR4_WORD0_SUPPORTED_ATTRS;
} else
- bitmask[0] = bitmask[1] = 0;
- dprintk("%s: bitmask=%08x:%08x\n", __func__, bitmask[0], bitmask[1]);
+ bitmask[0] = bitmask[1] = bitmask[2] = 0;
+ dprintk("%s: bitmask=%08x:%08x:%08x\n", __func__,
+ bitmask[0], bitmask[1], bitmask[2]);
return 0;
}
@@ -4105,7 +4202,7 @@ out_overflow:
static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res)
{
__be32 *savep;
- uint32_t attrlen, bitmap[2] = {0};
+ uint32_t attrlen, bitmap[3] = {0};
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
@@ -4131,7 +4228,7 @@ xdr_error:
static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
{
__be32 *savep;
- uint32_t attrlen, bitmap[2] = {0};
+ uint32_t attrlen, bitmap[3] = {0};
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
@@ -4163,7 +4260,7 @@ xdr_error:
static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf)
{
__be32 *savep;
- uint32_t attrlen, bitmap[2] = {0};
+ uint32_t attrlen, bitmap[3] = {0};
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
@@ -4303,7 +4400,7 @@ static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fat
{
__be32 *savep;
uint32_t attrlen,
- bitmap[2] = {0};
+ bitmap[3] = {0};
int status;
status = decode_op_hdr(xdr, OP_GETATTR);
@@ -4389,10 +4486,32 @@ static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap,
return status;
}
+/*
+ * The prefered block size for layout directed io
+ */
+static int decode_attr_layout_blksize(struct xdr_stream *xdr, uint32_t *bitmap,
+ uint32_t *res)
+{
+ __be32 *p;
+
+ dprintk("%s: bitmap is %x\n", __func__, bitmap[2]);
+ *res = 0;
+ if (bitmap[2] & FATTR4_WORD2_LAYOUT_BLKSIZE) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p)) {
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+ }
+ *res = be32_to_cpup(p);
+ bitmap[2] &= ~FATTR4_WORD2_LAYOUT_BLKSIZE;
+ }
+ return 0;
+}
+
static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
{
__be32 *savep;
- uint32_t attrlen, bitmap[2];
+ uint32_t attrlen, bitmap[3];
int status;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
@@ -4420,6 +4539,9 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype);
if (status != 0)
goto xdr_error;
+ status = decode_attr_layout_blksize(xdr, bitmap, &fsinfo->blksize);
+ if (status)
+ goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
@@ -4839,7 +4961,7 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
{
__be32 *savep;
uint32_t attrlen,
- bitmap[2] = {0};
+ bitmap[3] = {0};
struct kvec *iov = req->rq_rcv_buf.head;
int status;
@@ -5268,6 +5390,53 @@ out_overflow:
}
#if defined(CONFIG_NFS_V4_1)
+/*
+ * TODO: Need to handle case when EOF != true;
+ */
+static int decode_getdevicelist(struct xdr_stream *xdr,
+ struct pnfs_devicelist *res)
+{
+ __be32 *p;
+ int status, i;
+ struct nfs_writeverf verftemp;
+
+ status = decode_op_hdr(xdr, OP_GETDEVICELIST);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 8 + 8 + 4);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ /* TODO: Skip cookie for now */
+ p += 2;
+
+ /* Read verifier */
+ p = xdr_decode_opaque_fixed(p, verftemp.verifier, 8);
+
+ res->num_devs = be32_to_cpup(p);
+
+ dprintk("%s: num_dev %d\n", __func__, res->num_devs);
+
+ if (res->num_devs > NFS4_PNFS_GETDEVLIST_MAXNUM) {
+ printk(KERN_ERR "%s too many result dev_num %u\n",
+ __func__, res->num_devs);
+ return -EIO;
+ }
+
+ p = xdr_inline_decode(xdr,
+ res->num_devs * NFS4_DEVICEID4_SIZE + 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ for (i = 0; i < res->num_devs; i++)
+ p = xdr_decode_opaque_fixed(p, res->dev_id[i].data,
+ NFS4_DEVICEID4_SIZE);
+ res->eof = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
static int decode_getdeviceinfo(struct xdr_stream *xdr,
struct pnfs_device *pdev)
@@ -5430,6 +5599,7 @@ static int decode_layoutcommit(struct xdr_stream *xdr,
int status;
status = decode_op_hdr(xdr, OP_LAYOUTCOMMIT);
+ res->status = status;
if (status)
return status;
@@ -6542,6 +6712,32 @@ static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
}
/*
+ * Decode GETDEVICELIST response
+ */
+static int nfs4_xdr_dec_getdevicelist(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_getdevicelist_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ dprintk("encoding getdevicelist!\n");
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status != 0)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status != 0)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status != 0)
+ goto out;
+ status = decode_getdevicelist(xdr, res->devlist);
+out:
+ return status;
+}
+
+/*
* Decode GETDEVINFO response
*/
static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
@@ -6722,7 +6918,7 @@ out:
int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
int plus)
{
- uint32_t bitmap[2] = {0};
+ uint32_t bitmap[3] = {0};
uint32_t len;
__be32 *p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
@@ -6908,6 +7104,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
+ PROC(GETDEVICELIST, enc_getdevicelist, dec_getdevicelist),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 38e5508555c..e550e8836c3 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -76,8 +76,11 @@ find_pnfs_driver(u32 id)
void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{
- if (nfss->pnfs_curr_ld)
+ if (nfss->pnfs_curr_ld) {
+ if (nfss->pnfs_curr_ld->clear_layoutdriver)
+ nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
module_put(nfss->pnfs_curr_ld->owner);
+ }
nfss->pnfs_curr_ld = NULL;
}
@@ -88,7 +91,8 @@ unset_pnfs_layoutdriver(struct nfs_server *nfss)
* @id layout type. Zero (illegal layout type) indicates pNFS not in use.
*/
void
-set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
+set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
+ u32 id)
{
struct pnfs_layoutdriver_type *ld_type = NULL;
@@ -115,6 +119,13 @@ set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
goto out_no_driver;
}
server->pnfs_curr_ld = ld_type;
+ if (ld_type->set_layoutdriver
+ && ld_type->set_layoutdriver(server, mntfh)) {
+ printk(KERN_ERR "%s: Error initializing pNFS layout driver %u.\n",
+ __func__, id);
+ module_put(ld_type->owner);
+ goto out_no_driver;
+ }
dprintk("%s: pNFS module for %u set\n", __func__, id);
return;
@@ -190,6 +201,7 @@ static void
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
+ put_rpccred(lo->plh_lc_cred);
return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
}
@@ -224,6 +236,7 @@ static void
init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
{
INIT_LIST_HEAD(&lseg->pls_list);
+ INIT_LIST_HEAD(&lseg->pls_lc_list);
atomic_set(&lseg->pls_refcount, 1);
smp_mb();
set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
@@ -816,7 +829,9 @@ out:
}
static struct pnfs_layout_hdr *
-alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
+alloc_init_layout_hdr(struct inode *ino,
+ struct nfs_open_context *ctx,
+ gfp_t gfp_flags)
{
struct pnfs_layout_hdr *lo;
@@ -828,11 +843,14 @@ alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
INIT_LIST_HEAD(&lo->plh_segs);
INIT_LIST_HEAD(&lo->plh_bulk_recall);
lo->plh_inode = ino;
+ lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
return lo;
}
static struct pnfs_layout_hdr *
-pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
+pnfs_find_alloc_layout(struct inode *ino,
+ struct nfs_open_context *ctx,
+ gfp_t gfp_flags)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct pnfs_layout_hdr *new = NULL;
@@ -847,7 +865,7 @@ pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
return nfsi->layout;
}
spin_unlock(&ino->i_lock);
- new = alloc_init_layout_hdr(ino, gfp_flags);
+ new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
spin_lock(&ino->i_lock);
if (likely(nfsi->layout == NULL)) /* Won the race? */
@@ -940,7 +958,7 @@ pnfs_update_layout(struct inode *ino,
if (!pnfs_enabled_sb(NFS_SERVER(ino)))
return NULL;
spin_lock(&ino->i_lock);
- lo = pnfs_find_alloc_layout(ino, gfp_flags);
+ lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
goto out_unlock;
@@ -1350,16 +1368,17 @@ pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
/*
- * Currently there is only one (whole file) write lseg.
+ * There can be multiple RW segments.
*/
-static struct pnfs_layout_segment *pnfs_list_write_lseg(struct inode *inode)
+static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
{
- struct pnfs_layout_segment *lseg, *rv = NULL;
+ struct pnfs_layout_segment *lseg;
- list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
- if (lseg->pls_range.iomode == IOMODE_RW)
- rv = lseg;
- return rv;
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
+ if (lseg->pls_range.iomode == IOMODE_RW &&
+ test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+ list_add(&lseg->pls_lc_list, listp);
+ }
}
void
@@ -1371,17 +1390,19 @@ pnfs_set_layoutcommit(struct nfs_write_data *wdata)
spin_lock(&nfsi->vfs_inode.i_lock);
if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
- /* references matched in nfs4_layoutcommit_release */
- get_lseg(wdata->lseg);
- wdata->lseg->pls_lc_cred =
- get_rpccred(wdata->args.context->state->owner->so_cred);
mark_as_dirty = true;
dprintk("%s: Set layoutcommit for inode %lu ",
__func__, wdata->inode->i_ino);
}
- if (end_pos > wdata->lseg->pls_end_pos)
- wdata->lseg->pls_end_pos = end_pos;
+ if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
+ /* references matched in nfs4_layoutcommit_release */
+ get_lseg(wdata->lseg);
+ }
+ if (end_pos > nfsi->layout->plh_lwb)
+ nfsi->layout->plh_lwb = end_pos;
spin_unlock(&nfsi->vfs_inode.i_lock);
+ dprintk("%s: lseg %p end_pos %llu\n",
+ __func__, wdata->lseg, nfsi->layout->plh_lwb);
/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
* will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
@@ -1390,6 +1411,14 @@ pnfs_set_layoutcommit(struct nfs_write_data *wdata)
}
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
+void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
+{
+ struct nfs_server *nfss = NFS_SERVER(data->args.inode);
+
+ if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
+ nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
+}
+
/*
* For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
* NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
@@ -1403,8 +1432,6 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
{
struct nfs4_layoutcommit_data *data;
struct nfs_inode *nfsi = NFS_I(inode);
- struct pnfs_layout_segment *lseg;
- struct rpc_cred *cred;
loff_t end_pos;
int status = 0;
@@ -1421,30 +1448,25 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
goto out;
}
+ INIT_LIST_HEAD(&data->lseg_list);
spin_lock(&inode->i_lock);
if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
spin_unlock(&inode->i_lock);
kfree(data);
goto out;
}
- /*
- * Currently only one (whole file) write lseg which is referenced
- * in pnfs_set_layoutcommit and will be found.
- */
- lseg = pnfs_list_write_lseg(inode);
- end_pos = lseg->pls_end_pos;
- cred = lseg->pls_lc_cred;
- lseg->pls_end_pos = 0;
- lseg->pls_lc_cred = NULL;
+ pnfs_list_write_lseg(inode, &data->lseg_list);
+
+ end_pos = nfsi->layout->plh_lwb;
+ nfsi->layout->plh_lwb = 0;
memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data,
sizeof(nfsi->layout->plh_stateid.data));
spin_unlock(&inode->i_lock);
data->args.inode = inode;
- data->lseg = lseg;
- data->cred = cred;
+ data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
nfs_fattr_init(&data->fattr);
data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
data->res.fattr = &data->fattr;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 078670dfbe0..01cbfd54f3c 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -36,16 +36,16 @@
enum {
NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */
NFS_LSEG_ROC, /* roc bit received from server */
+ NFS_LSEG_LAYOUTCOMMIT, /* layoutcommit bit set for layoutcommit */
};
struct pnfs_layout_segment {
struct list_head pls_list;
+ struct list_head pls_lc_list;
struct pnfs_layout_range pls_range;
atomic_t pls_refcount;
unsigned long pls_flags;
struct pnfs_layout_hdr *pls_layout;
- struct rpc_cred *pls_lc_cred; /* LAYOUTCOMMIT credential */
- loff_t pls_end_pos; /* LAYOUTCOMMIT write end */
};
enum pnfs_try_status {
@@ -80,6 +80,9 @@ struct pnfs_layoutdriver_type {
struct module *owner;
unsigned flags;
+ int (*set_layoutdriver) (struct nfs_server *, const struct nfs_fh *);
+ int (*clear_layoutdriver) (struct nfs_server *);
+
struct pnfs_layout_hdr * (*alloc_layout_hdr) (struct inode *inode, gfp_t gfp_flags);
void (*free_layout_hdr) (struct pnfs_layout_hdr *);
@@ -110,6 +113,8 @@ struct pnfs_layoutdriver_type {
struct xdr_stream *xdr,
const struct nfs4_layoutreturn_args *args);
+ void (*cleanup_layoutcommit) (struct nfs4_layoutcommit_data *data);
+
void (*encode_layoutcommit) (struct pnfs_layout_hdr *layoutid,
struct xdr_stream *xdr,
const struct nfs4_layoutcommit_args *args);
@@ -125,6 +130,8 @@ struct pnfs_layout_hdr {
unsigned long plh_block_lgets; /* block LAYOUTGET if >0 */
u32 plh_barrier; /* ignore lower seqids */
unsigned long plh_flags;
+ loff_t plh_lwb; /* last write byte for layoutcommit */
+ struct rpc_cred *plh_lc_cred; /* layoutcommit cred */
struct inode *plh_inode;
};
@@ -137,10 +144,21 @@ struct pnfs_device {
unsigned int pglen;
};
+#define NFS4_PNFS_GETDEVLIST_MAXNUM 16
+
+struct pnfs_devicelist {
+ unsigned int eof;
+ unsigned int num_devs;
+ struct nfs4_deviceid dev_id[NFS4_PNFS_GETDEVLIST_MAXNUM];
+};
+
extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *);
extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
/* nfs4proc.c */
+extern int nfs4_proc_getdevicelist(struct nfs_server *server,
+ const struct nfs_fh *fh,
+ struct pnfs_devicelist *devlist);
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev);
extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
@@ -153,7 +171,7 @@ void put_lseg(struct pnfs_layout_segment *lseg);
bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int);
-void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
+void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
void unset_pnfs_layoutdriver(struct nfs_server *);
void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *);
int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
@@ -179,6 +197,7 @@ void pnfs_roc_release(struct inode *ino);
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
+void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
int _pnfs_return_layout(struct inode *);
int pnfs_ld_write_done(struct nfs_write_data *);
@@ -360,7 +379,8 @@ pnfs_roc_drain(struct inode *ino, u32 *barrier)
return false;
}
-static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id)
+static inline void set_pnfs_layoutdriver(struct nfs_server *s,
+ const struct nfs_fh *mntfh, u32 id)
{
}
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 783c58d9daf..a7219075b4d 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -247,7 +247,7 @@ static int ocfs2_set_acl(handle_t *handle,
case ACL_TYPE_ACCESS:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
ret = posix_acl_equiv_mode(acl, &mode);
if (ret < 0)
return ret;
@@ -351,7 +351,7 @@ int ocfs2_init_acl(handle_t *handle,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl = NULL;
int ret = 0, ret2;
- mode_t mode;
+ umode_t mode;
if (!S_ISLNK(inode->i_mode)) {
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index d43729a760e..10027b42b7e 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -149,10 +149,10 @@ posix_acl_valid(const struct posix_acl *acl)
* file mode permission bits, or else 1. Returns -E... on error.
*/
int
-posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p)
+posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
{
const struct posix_acl_entry *pa, *pe;
- mode_t mode = 0;
+ umode_t mode = 0;
int not_equiv = 0;
FOREACH_ACL_ENTRY(pa, acl, pe) {
@@ -188,7 +188,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p)
* Create an ACL representing the file mode permission bits of an inode.
*/
struct posix_acl *
-posix_acl_from_mode(mode_t mode, gfp_t flags)
+posix_acl_from_mode(umode_t mode, gfp_t flags)
{
struct posix_acl *acl = posix_acl_alloc(3, flags);
if (!acl)
@@ -279,11 +279,11 @@ check_perm:
* system calls. All permissions that are not granted by the acl are removed.
* The permissions in the acl are changed to reflect the mode_p parameter.
*/
-static int posix_acl_create_masq(struct posix_acl *acl, mode_t *mode_p)
+static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
{
struct posix_acl_entry *pa, *pe;
struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL;
- mode_t mode = *mode_p;
+ umode_t mode = *mode_p;
int not_equiv = 0;
/* assert(atomic_read(acl->a_refcount) == 1); */
@@ -336,7 +336,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, mode_t *mode_p)
/*
* Modify the ACL for the chmod syscall.
*/
-static int posix_acl_chmod_masq(struct posix_acl *acl, mode_t mode)
+static int posix_acl_chmod_masq(struct posix_acl *acl, umode_t mode)
{
struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL;
struct posix_acl_entry *pa, *pe;
@@ -382,7 +382,7 @@ static int posix_acl_chmod_masq(struct posix_acl *acl, mode_t mode)
}
int
-posix_acl_create(struct posix_acl **acl, gfp_t gfp, mode_t *mode_p)
+posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
{
struct posix_acl *clone = posix_acl_clone(*acl, gfp);
int err = -ENOMEM;
@@ -400,7 +400,7 @@ posix_acl_create(struct posix_acl **acl, gfp_t gfp, mode_t *mode_p)
EXPORT_SYMBOL(posix_acl_create);
int
-posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, mode_t mode)
+posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode)
{
struct posix_acl *clone = posix_acl_clone(*acl, gfp);
int err = -ENOMEM;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 08e3eccf9a1..5eb02069e1b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1118,7 +1118,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
* Warn that /proc/pid/oom_adj is deprecated, see
* Documentation/feature-removal-schedule.txt.
*/
- WARN_ONCE(1, "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
+ printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
current->comm, task_pid_nr(current), task_pid_nr(task),
task_pid_nr(task));
task->signal->oom_adj = oom_adjust;
@@ -1919,6 +1919,14 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
spin_lock(&files->file_lock);
file = fcheck_files(files, fd);
if (file) {
+ unsigned int f_flags;
+ struct fdtable *fdt;
+
+ fdt = files_fdtable(files);
+ f_flags = file->f_flags & ~O_CLOEXEC;
+ if (FD_ISSET(fd, fdt->close_on_exec))
+ f_flags |= O_CLOEXEC;
+
if (path) {
*path = file->f_path;
path_get(&file->f_path);
@@ -1928,7 +1936,7 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
"pos:\t%lli\n"
"flags:\t0%o\n",
(long long) file->f_pos,
- file->f_flags);
+ f_flags);
spin_unlock(&files->file_lock);
put_files_struct(files);
return 0;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 977ed272384..893b961dcfd 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -39,8 +39,9 @@
#define PSTORE_NAMELEN 64
struct pstore_private {
+ struct pstore_info *psi;
+ enum pstore_type_id type;
u64 id;
- int (*erase)(u64);
ssize_t size;
char data[];
};
@@ -73,7 +74,7 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
{
struct pstore_private *p = dentry->d_inode->i_private;
- p->erase(p->id);
+ p->psi->erase(p->type, p->id, p->psi);
return simple_unlink(dir, dentry);
}
@@ -175,8 +176,8 @@ int pstore_is_mounted(void)
* Set the mtime & ctime to the date that this record was originally stored.
*/
int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id,
- char *data, size_t size,
- struct timespec time, int (*erase)(u64))
+ char *data, size_t size, struct timespec time,
+ struct pstore_info *psi)
{
struct dentry *root = pstore_sb->s_root;
struct dentry *dentry;
@@ -192,8 +193,9 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id,
private = kmalloc(sizeof *private + size, GFP_KERNEL);
if (!private)
goto fail_alloc;
+ private->type = type;
private->id = id;
- private->erase = erase;
+ private->psi = psi;
switch (type) {
case PSTORE_TYPE_DMESG:
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 8c9f23eb164..611c1b3c46f 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -2,5 +2,5 @@ extern void pstore_set_kmsg_bytes(int);
extern void pstore_get_records(void);
extern int pstore_mkfile(enum pstore_type_id, char *psname, u64 id,
char *data, size_t size,
- struct timespec time, int (*erase)(u64));
+ struct timespec time, struct pstore_info *psi);
extern int pstore_is_mounted(void);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index f2c3ff20ea6..c5300ec3169 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -37,6 +37,8 @@
static DEFINE_SPINLOCK(pstore_lock);
static struct pstore_info *psinfo;
+static char *backend;
+
/* How much of the console log to snapshot */
static unsigned long kmsg_bytes = 10240;
@@ -67,7 +69,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
unsigned long size, total = 0;
char *dst, *why;
u64 id;
- int hsize, part = 1;
+ int hsize;
+ unsigned int part = 1;
if (reason < ARRAY_SIZE(reason_str))
why = reason_str[reason];
@@ -78,7 +81,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
oopscount++;
while (total < kmsg_bytes) {
dst = psinfo->buf;
- hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part++);
+ hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
size = psinfo->bufsize - hsize;
dst += hsize;
@@ -94,14 +97,16 @@ static void pstore_dump(struct kmsg_dumper *dumper,
memcpy(dst, s1 + s1_start, l1_cpy);
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
- id = psinfo->write(PSTORE_TYPE_DMESG, hsize + l1_cpy + l2_cpy);
+ id = psinfo->write(PSTORE_TYPE_DMESG, part,
+ hsize + l1_cpy + l2_cpy, psinfo);
if (reason == KMSG_DUMP_OOPS && pstore_is_mounted())
pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id,
psinfo->buf, hsize + l1_cpy + l2_cpy,
- CURRENT_TIME, psinfo->erase);
+ CURRENT_TIME, psinfo);
l1 -= l1_cpy;
l2 -= l2_cpy;
total += l1_cpy + l2_cpy;
+ part++;
}
mutex_unlock(&psinfo->buf_mutex);
}
@@ -128,6 +133,12 @@ int pstore_register(struct pstore_info *psi)
spin_unlock(&pstore_lock);
return -EBUSY;
}
+
+ if (backend && strcmp(backend, psi->name)) {
+ spin_unlock(&pstore_lock);
+ return -EINVAL;
+ }
+
psinfo = psi;
spin_unlock(&pstore_lock);
@@ -166,9 +177,9 @@ void pstore_get_records(void)
if (rc)
goto out;
- while ((size = psi->read(&id, &type, &time)) > 0) {
+ while ((size = psi->read(&id, &type, &time, psi)) > 0) {
if (pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size,
- time, psi->erase))
+ time, psi))
failed++;
}
psi->close(psi);
@@ -196,12 +207,15 @@ int pstore_write(enum pstore_type_id type, char *buf, size_t size)
mutex_lock(&psinfo->buf_mutex);
memcpy(psinfo->buf, buf, size);
- id = psinfo->write(type, size);
+ id = psinfo->write(type, 0, size, psinfo);
if (pstore_is_mounted())
pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id, psinfo->buf,
- size, CURRENT_TIME, psinfo->erase);
+ size, CURRENT_TIME, psinfo);
mutex_unlock(&psinfo->buf_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(pstore_write);
+
+module_param(backend, charp, 0444);
+MODULE_PARM_DESC(backend, "Pstore backend to use");
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 7362cf4c946..6da0396e505 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -272,12 +272,10 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
if (acl) {
- mode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
+ error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
- inode->i_mode = mode;
if (error == 0)
acl = NULL;
}
@@ -354,8 +352,6 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
return PTR_ERR(acl);
if (acl) {
- mode_t mode = inode->i_mode;
-
/* Copy the default ACL to the default ACL of a new directory */
if (S_ISDIR(inode->i_mode)) {
err = reiserfs_set_acl(th, inode, ACL_TYPE_DEFAULT,
@@ -366,12 +362,10 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
/* Now we reconcile the new ACL and the mode,
potentially modifying both */
- err = posix_acl_create(&acl, GFP_NOFS, &mode);
+ err = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (err < 0)
return err;
- inode->i_mode = mode;
-
/* If we need an ACL.. */
if (err > 0)
err = reiserfs_set_acl(th, inode, ACL_TYPE_ACCESS, acl);
diff --git a/fs/stack.c b/fs/stack.c
index 4a6f7f44065..b4f2ab48a61 100644
--- a/fs/stack.c
+++ b/fs/stack.c
@@ -29,10 +29,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
*
* We don't actually know what locking is used at the lower level;
* but if it's a filesystem that supports quotas, it will be using
- * i_lock as in inode_add_bytes(). tmpfs uses other locking, and
- * its 32-bit is (just) able to exceed 2TB i_size with the aid of
- * holes; but its i_blocks cannot carry into the upper long without
- * almost 2TB swap - let's ignore that case.
+ * i_lock as in inode_add_bytes().
*/
if (sizeof(i_blocks) > sizeof(long))
spin_lock(&src->i_lock);
diff --git a/fs/stat.c b/fs/stat.c
index 961039121cb..ba5316ffac6 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -27,12 +27,12 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
stat->rdev = inode->i_rdev;
+ stat->size = i_size_read(inode);
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
- stat->size = i_size_read(inode);
- stat->blocks = inode->i_blocks;
stat->blksize = (1 << inode->i_blkbits);
+ stat->blocks = inode->i_blocks;
}
EXPORT_SYMBOL(generic_fillattr);
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 44ce5165680..b6c4b3795c4 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -221,7 +221,7 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
}
static int
-xfs_set_mode(struct inode *inode, mode_t mode)
+xfs_set_mode(struct inode *inode, umode_t mode)
{
int error = 0;
@@ -267,7 +267,7 @@ posix_acl_default_exists(struct inode *inode)
int
xfs_inherit_acl(struct inode *inode, struct posix_acl *acl)
{
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
int error = 0, inherit = 0;
if (S_ISDIR(inode->i_mode)) {
@@ -381,7 +381,7 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
goto out_release;
if (type == ACL_TYPE_ACCESS) {
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
if (error <= 0) {
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 3090471b2a5..e49c36d38d7 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
extern int register_dock_notifier(struct notifier_block *nb);
extern void unregister_dock_notifier(struct notifier_block *nb);
extern int register_hotplug_dock_device(acpi_handle handle,
- struct acpi_dock_ops *ops,
+ const struct acpi_dock_ops *ops,
void *context);
extern void unregister_hotplug_dock_device(acpi_handle handle);
#else
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 2ed0a8486c1..f554a9313b4 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20110413
+#define ACPI_CA_VERSION 0x20110623
#include "actypes.h"
#include "actbl.h"
@@ -69,6 +69,7 @@ extern u32 acpi_gbl_trace_flags;
extern u32 acpi_gbl_enable_aml_debug_object;
extern u8 acpi_gbl_copy_dsdt_locally;
extern u8 acpi_gbl_truncate_io_addresses;
+extern u8 acpi_gbl_disable_auto_repair;
extern u32 acpi_current_gpe_count;
extern struct acpi_table_fadt acpi_gbl_FADT;
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index e67b523a50e..51a527d24a8 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -18,6 +18,11 @@
extern int hest_disable;
extern int erst_disable;
+#ifdef CONFIG_ACPI_APEI_GHES
+extern int ghes_disable;
+#else
+#define ghes_disable 1
+#endif
#ifdef CONFIG_ACPI_APEI
void __init acpi_hest_init(void);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index ba4928cae47..67055f18033 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -337,7 +337,7 @@ extern struct cpuidle_driver acpi_idle_driver;
/* in processor_thermal.c */
int acpi_processor_get_limit_info(struct acpi_processor *pr);
-extern struct thermal_cooling_device_ops processor_cooling_ops;
+extern const struct thermal_cooling_device_ops processor_cooling_ops;
#ifdef CONFIG_CPU_FREQ
void acpi_thermal_cpufreq_init(void);
void acpi_thermal_cpufreq_exit(void);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 33d12f87f0e..44335e57eaa 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -205,6 +205,8 @@ struct drm_display_info {
enum subpixel_order subpixel_order;
u32 color_formats;
+ u8 cea_rev;
+
char *raw_edid; /* if any */
};
@@ -802,6 +804,7 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
+extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh);
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index c4d6dbfa3ff..28c0d114cb5 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -237,7 +237,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
-#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
/* Allow drivers to submit batchbuffers directly to hardware, relying
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 1deb2a73c2d..6001b4da39d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -238,7 +238,6 @@ extern int acpi_paddr_to_node(u64 start_addr, u64 size);
extern int pnpacpi_disabled;
#define PXM_INVAL (-1)
-#define NID_INVAL (-1)
int acpi_check_resource_conflict(const struct resource *res);
@@ -280,6 +279,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPUHP_OST_SUPPORT 8
#define OSC_SB_APEI_SUPPORT 16
+extern bool osc_sb_apei_support_acked;
+
/* PCI defined _OSC bits */
/* _OSC DW1 Definition (OS Support Fields) */
#define OSC_EXT_PCI_CONFIG_SUPPORT 1
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 3111385b8ca..e6e28f37d8e 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -172,8 +172,11 @@ struct pl08x_dma_chan {
int phychan_hold;
struct tasklet_struct tasklet;
char *name;
- struct pl08x_channel_data *cd;
- dma_addr_t runtime_addr;
+ const struct pl08x_channel_data *cd;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ u32 src_cctl;
+ u32 dst_cctl;
enum dma_data_direction runtime_direction;
dma_cookie_t lc;
struct list_head pend_list;
@@ -202,7 +205,7 @@ struct pl08x_dma_chan {
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
*/
struct pl08x_platform_data {
- struct pl08x_channel_data *slave_channels;
+ const struct pl08x_channel_data *slave_channels;
unsigned int num_slave_channels;
struct pl08x_channel_data memcpy_channel;
int (*get_signal)(struct pl08x_dma_chan *);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 3bac44cce14..7ad634501e4 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -146,6 +146,7 @@ extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 36719ead50e..b51629e15cf 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -122,6 +122,8 @@ struct cpuidle_driver {
};
#ifdef CONFIG_CPU_IDLE
+extern void disable_cpuidle(void);
+extern int cpuidle_idle_call(void);
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
struct cpuidle_driver *cpuidle_get_driver(void);
@@ -135,6 +137,8 @@ extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
#else
+static inline void disable_cpuidle(void) { }
+static inline int cpuidle_idle_call(void) { return -ENODEV; }
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
{return -ENODEV; }
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 48e82af1159..98f46efbe2d 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -265,10 +265,11 @@ static inline void put_cred(const struct cred *_cred)
/**
* current_cred - Access the current task's subjective credentials
*
- * Access the subjective credentials of the current task.
+ * Access the subjective credentials of the current task. RCU-safe,
+ * since nobody else can modify it.
*/
#define current_cred() \
- (current->cred)
+ (*(__force struct cred **)&current->cred)
/**
* __task_cred - Access a task's objective credentials
@@ -307,7 +308,7 @@ static inline void put_cred(const struct cred *_cred)
({ \
struct user_struct *__u; \
struct cred *__cred; \
- __cred = (struct cred *) current_cred(); \
+ __cred = current_cred(); \
__u = get_uid(__cred->user); \
__u; \
})
@@ -322,7 +323,7 @@ static inline void put_cred(const struct cred *_cred)
({ \
struct group_info *__groups; \
struct cred *__cred; \
- __cred = (struct cred *) current_cred(); \
+ __cred = current_cred(); \
__groups = get_group_info(__cred->group_info); \
__groups; \
})
@@ -341,7 +342,7 @@ static inline void put_cred(const struct cred *_cred)
#define current_cred_xxx(xxx) \
({ \
- current->cred->xxx; \
+ current_cred()->xxx; \
})
#define current_uid() (current_cred_xxx(uid))
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index ec78a4bbe1d..2cd9f1cf9fa 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -3,11 +3,16 @@
#define SHA_DIGEST_WORDS 5
#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
-#define SHA_WORKSPACE_WORDS 80
+#define SHA_WORKSPACE_WORDS 16
void sha_init(__u32 *buf);
void sha_transform(__u32 *digest, const char *data, __u32 *W);
+#define MD5_DIGEST_WORDS 4
+#define MD5_MESSAGE_BYTES 64
+
+void md5_transform(__u32 *hash, __u32 const *in);
+
__u32 half_md4_transform(__u32 buf[4], __u32 const in[8]);
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d37d2a79309..62157c03caf 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -180,12 +180,12 @@ struct dentry_operations {
*/
/* d_flags entries */
-#define DCACHE_AUTOFS_PENDING 0x0001 /* autofs: "under construction" */
-#define DCACHE_NFSFS_RENAMED 0x0002
- /* this dentry has been "silly renamed" and has to be deleted on the last
- * dput() */
+#define DCACHE_OP_HASH 0x0001
+#define DCACHE_OP_COMPARE 0x0002
+#define DCACHE_OP_REVALIDATE 0x0004
+#define DCACHE_OP_DELETE 0x0008
-#define DCACHE_DISCONNECTED 0x0004
+#define DCACHE_DISCONNECTED 0x0010
/* This dentry is possibly not currently connected to the dcache tree, in
* which case its parent will either be itself, or will have this flag as
* well. nfsd will not use a dentry with this bit set, but will first
@@ -196,22 +196,18 @@ struct dentry_operations {
* dentry into place and return that dentry rather than the passed one,
* typically using d_splice_alias. */
-#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
-#define DCACHE_RCUACCESS 0x0010 /* Entry has ever been RCU-visible */
-#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020
- /* Parent inode is watched by inotify */
-
-#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080
- /* Parent inode is watched by some fsnotify listener */
+#define DCACHE_REFERENCED 0x0020 /* Recently used, don't discard. */
+#define DCACHE_RCUACCESS 0x0040 /* Entry has ever been RCU-visible */
#define DCACHE_CANT_MOUNT 0x0100
#define DCACHE_GENOCIDE 0x0200
-#define DCACHE_OP_HASH 0x1000
-#define DCACHE_OP_COMPARE 0x2000
-#define DCACHE_OP_REVALIDATE 0x4000
-#define DCACHE_OP_DELETE 0x8000
+#define DCACHE_NFSFS_RENAMED 0x1000
+ /* this dentry has been "silly renamed" and has to be deleted on the last
+ * dput() */
+#define DCACHE_COOKIE 0x2000 /* For use by dcookie subsystem */
+#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x4000
+ /* Parent inode is watched by some fsnotify listener */
#define DCACHE_MOUNTED 0x10000 /* is a mountpoint */
#define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 4427e045405..3fa1f3d90ce 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -208,6 +208,49 @@ struct dm_target_callbacks {
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
+/*
+ * Target argument parsing.
+ */
+struct dm_arg_set {
+ unsigned argc;
+ char **argv;
+};
+
+/*
+ * The minimum and maximum value of a numeric argument, together with
+ * the error message to use if the number is found to be outside that range.
+ */
+struct dm_arg {
+ unsigned min;
+ unsigned max;
+ char *error;
+};
+
+/*
+ * Validate the next argument, either returning it as *value or, if invalid,
+ * returning -EINVAL and setting *error.
+ */
+int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error);
+
+/*
+ * Process the next argument as the start of a group containing between
+ * arg->min and arg->max further arguments. Either return the size as
+ * *num_args or, if invalid, return -EINVAL and set *error.
+ */
+int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *num_args, char **error);
+
+/*
+ * Return the current argument and shift to the next.
+ */
+const char *dm_shift_arg(struct dm_arg_set *as);
+
+/*
+ * Move through num_args arguments.
+ */
+void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
+
/*-----------------------------------------------------------------
* Functions for creating and manipulating mapped devices.
* Drop the reference with dm_put when you finish with the object.
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 3708455ee6c..0cb8eff76bd 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 20
+#define DM_VERSION_MINOR 21
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2011-02-02)"
+#define DM_VERSION_EXTRA "-ioctl (2011-07-06)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index 298d587e349..5e54458e920 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -42,5 +42,20 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
unsigned num_dests, struct dm_io_region *dests,
unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+/*
+ * Prepare a callback and submit it via the kcopyd thread.
+ *
+ * dm_kcopyd_prepare_callback allocates a callback structure and returns it.
+ * It must not be called from interrupt context.
+ * The returned value should be passed into dm_kcopyd_do_callback.
+ *
+ * dm_kcopyd_do_callback submits the callback.
+ * It may be called from interrupt context.
+ * The callback is issued from the kcopyd thread.
+ */
+void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
+ dm_kcopyd_notify_fn fn, void *context);
+void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_KCOPYD_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ec257269392..2362a0bc7f0 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -19,6 +19,7 @@
#include <linux/rtc.h>
#include <linux/ioport.h>
#include <linux/pfn.h>
+#include <linux/pstore.h>
#include <asm/page.h>
#include <asm/system.h>
@@ -232,6 +233,9 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
#define UV_SYSTEM_TABLE_GUID \
EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 )
+#define LINUX_EFI_CRASH_GUID \
+ EFI_GUID( 0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0 )
+
typedef struct {
efi_guid_t guid;
unsigned long table;
@@ -458,6 +462,8 @@ struct efivars {
struct kset *kset;
struct bin_attribute *new_var, *del_var;
const struct efivar_operations *ops;
+ struct efivar_entry *walk_entry;
+ struct pstore_info efi_pstore_info;
};
int register_efivars(struct efivars *efivars,
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 3ff060ac781..c6f996f2abb 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -25,10 +25,6 @@ struct fault_attr {
unsigned long reject_end;
unsigned long count;
-
-#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
- struct dentry *dir;
-#endif
};
#define FAULT_ATTR_INITIALIZER { \
@@ -45,19 +41,15 @@ bool should_fail(struct fault_attr *attr, ssize_t size);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
-int init_fault_attr_dentries(struct fault_attr *attr, const char *name);
-void cleanup_fault_attr_dentries(struct fault_attr *attr);
+struct dentry *fault_create_debugfs_attr(const char *name,
+ struct dentry *parent, struct fault_attr *attr);
#else /* CONFIG_FAULT_INJECTION_DEBUG_FS */
-static inline int init_fault_attr_dentries(struct fault_attr *attr,
- const char *name)
-{
- return -ENODEV;
-}
-
-static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
+static inline struct dentry *fault_create_debugfs_attr(const char *name,
+ struct dentry *parent, struct fault_attr *attr)
{
+ return ERR_PTR(-ENODEV);
}
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f23bcb77260..178cdb4f1d4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -738,22 +738,54 @@ static inline int mapping_writably_mapped(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+#define IOP_FASTPERM 0x0001
+#define IOP_LOOKUP 0x0002
+#define IOP_NOFOLLOW 0x0004
+
+/*
+ * Keep mostly read-only and often accessed (especially for
+ * the RCU path lookup and 'stat' data) fields at the beginning
+ * of the 'struct inode'
+ */
struct inode {
- /* RCU path lookup touches following: */
umode_t i_mode;
+ unsigned short i_opflags;
uid_t i_uid;
gid_t i_gid;
+ unsigned int i_flags;
+
+#ifdef CONFIG_FS_POSIX_ACL
+ struct posix_acl *i_acl;
+ struct posix_acl *i_default_acl;
+#endif
+
const struct inode_operations *i_op;
struct super_block *i_sb;
+ struct address_space *i_mapping;
- spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
- unsigned int i_flags;
- unsigned long i_state;
#ifdef CONFIG_SECURITY
void *i_security;
#endif
- struct mutex i_mutex;
+ /* Stat data, not accessed from path walking */
+ unsigned long i_ino;
+ unsigned int i_nlink;
+ dev_t i_rdev;
+ loff_t i_size;
+ struct timespec i_atime;
+ struct timespec i_mtime;
+ struct timespec i_ctime;
+ unsigned int i_blkbits;
+ blkcnt_t i_blocks;
+
+#ifdef __NEED_I_SIZE_ORDERED
+ seqcount_t i_size_seqcount;
+#endif
+
+ /* Misc */
+ unsigned long i_state;
+ spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
+ struct mutex i_mutex;
unsigned long dirtied_when; /* jiffies of first dirtying */
@@ -765,25 +797,12 @@ struct inode {
struct list_head i_dentry;
struct rcu_head i_rcu;
};
- unsigned long i_ino;
atomic_t i_count;
- unsigned int i_nlink;
- dev_t i_rdev;
- unsigned int i_blkbits;
u64 i_version;
- loff_t i_size;
-#ifdef __NEED_I_SIZE_ORDERED
- seqcount_t i_size_seqcount;
-#endif
- struct timespec i_atime;
- struct timespec i_mtime;
- struct timespec i_ctime;
- blkcnt_t i_blocks;
unsigned short i_bytes;
atomic_t i_dio_count;
const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
struct file_lock *i_flock;
- struct address_space *i_mapping;
struct address_space i_data;
#ifdef CONFIG_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
@@ -806,10 +825,6 @@ struct inode {
atomic_t i_readcount; /* struct files open RO */
#endif
atomic_t i_writecount;
-#ifdef CONFIG_FS_POSIX_ACL
- struct posix_acl *i_acl;
- struct posix_acl *i_default_acl;
-#endif
void *i_private; /* fs or device private pointer */
};
@@ -2317,11 +2332,18 @@ extern int should_remove_suid(struct dentry *);
extern int file_remove_suid(struct file *);
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
-extern void remove_inode_hash(struct inode *);
static inline void insert_inode_hash(struct inode *inode)
{
__insert_inode_hash(inode, inode->i_ino);
}
+
+extern void __remove_inode_hash(struct inode *);
+static inline void remove_inode_hash(struct inode *inode)
+{
+ if (!inode_unhashed(inode))
+ __remove_inode_hash(inode);
+}
+
extern void inode_sb_list_add(struct inode *inode);
#ifdef CONFIG_BLOCK
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 5bbebda78b0..5e98eeb2af3 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -1,8 +1,26 @@
/*
- * Basic general purpose allocator for managing special purpose memory
- * not managed by the regular kmalloc/kfree interface.
- * Uses for this includes on-device special memory, uncached memory
- * etc.
+ * Basic general purpose allocator for managing special purpose
+ * memory, for example, memory that is not managed by the regular
+ * kmalloc/kfree interface. Uses for this includes on-device special
+ * memory, uncached memory etc.
+ *
+ * It is safe to use the allocator in NMI handlers and other special
+ * unblockable contexts that could otherwise deadlock on locks. This
+ * is implemented by using atomic operations and retries on any
+ * conflicts. The disadvantage is that there may be livelocks in
+ * extreme cases. For better scalability, one allocator can be used
+ * for each CPU.
+ *
+ * The lockless operation only works if there is enough memory
+ * available. If new memory is added to the pool a lock has to be
+ * still taken. So any user relying on locklessness has to ensure
+ * that sufficient memory is preallocated.
+ *
+ * The basic atomic operation of this allocator is cmpxchg on long.
+ * On architectures that don't have NMI-safe cmpxchg implementation,
+ * the allocator can NOT be used in NMI handler. So code uses the
+ * allocator in NMI handler should depend on
+ * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
@@ -15,7 +33,7 @@
* General purpose special memory pool descriptor.
*/
struct gen_pool {
- rwlock_t lock;
+ spinlock_t lock;
struct list_head chunks; /* list of chunks in this pool */
int min_alloc_order; /* minimum allocation order */
};
@@ -24,8 +42,8 @@ struct gen_pool {
* General purpose special memory pool chunk descriptor.
*/
struct gen_pool_chunk {
- spinlock_t lock;
struct list_head next_chunk; /* next chunk in pool */
+ atomic_t avail;
phys_addr_t phys_addr; /* physical starting address of memory chunk */
unsigned long start_addr; /* starting address of memory chunk */
unsigned long end_addr; /* ending address of memory chunk */
@@ -56,4 +74,8 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
extern void gen_pool_destroy(struct gen_pool *);
extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
+extern void gen_pool_for_each_chunk(struct gen_pool *,
+ void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
+extern size_t gen_pool_avail(struct gen_pool *);
+extern size_t gen_pool_size(struct gen_pool *);
#endif /* __GENALLOC_H__ */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index cb4089254f0..3a76faf6a3e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -92,7 +92,7 @@ struct vm_area_struct;
*/
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_BITS_SHIFT 23 /* Room for 23 __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 24 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 13a801f3d02..255491cf522 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -146,6 +146,10 @@ void ida_remove(struct ida *ida, int id);
void ida_destroy(struct ida *ida);
void ida_init(struct ida *ida);
+int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
+ gfp_t gfp_mask);
+void ida_simple_remove(struct ida *ida, unsigned int id);
+
void __init idr_init_cache(void);
#endif /* __IDR_H__ */
diff --git a/include/linux/input.h b/include/linux/input.h
index 068784e1797..a637e781433 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -438,6 +438,8 @@ struct input_keymap_entry {
#define KEY_WIMAX 246
#define KEY_RFKILL 247 /* Key that controls all radios */
+#define KEY_MICMUTE 248 /* Mute / unmute the microphone */
+
/* Code 255 is reserved for special needs of AT keyboard driver */
#define BTN_MISC 0x100
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index c2ebfe66177..9d57a71775b 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -162,6 +162,7 @@ extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t,
resource_size_t),
void *alignf_data);
+struct resource *lookup_resource(struct resource *root, resource_size_t start);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index d087c2e7b2a..38f307b8c33 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1329,12 +1329,6 @@ extern int jbd_blocks_per_page(struct inode *inode);
#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
#define JBUFFER_TRACE(jh, info) do {} while (0)
-/*
- * jbd2_dev_to_name is a utility function used by the jbd2 and ext4
- * tracing infrastructure to map a dev_t to a device name.
- */
-extern const char *jbd2_dev_to_name(dev_t device);
-
#endif /* __KERNEL__ */
#endif /* _LINUX_JBD2_H */
diff --git a/include/linux/llist.h b/include/linux/llist.h
new file mode 100644
index 00000000000..aa0c8b5b3cd
--- /dev/null
+++ b/include/linux/llist.h
@@ -0,0 +1,126 @@
+#ifndef LLIST_H
+#define LLIST_H
+/*
+ * Lock-less NULL terminated single linked list
+ *
+ * If there are multiple producers and multiple consumers, llist_add
+ * can be used in producers and llist_del_all can be used in
+ * consumers. They can work simultaneously without lock. But
+ * llist_del_first can not be used here. Because llist_del_first
+ * depends on list->first->next does not changed if list->first is not
+ * changed during its operation, but llist_del_first, llist_add,
+ * llist_add (or llist_del_all, llist_add, llist_add) sequence in
+ * another consumer may violate that.
+ *
+ * If there are multiple producers and one consumer, llist_add can be
+ * used in producers and llist_del_all or llist_del_first can be used
+ * in the consumer.
+ *
+ * This can be summarized as follow:
+ *
+ * | add | del_first | del_all
+ * add | - | - | -
+ * del_first | | L | L
+ * del_all | | | -
+ *
+ * Where "-" stands for no lock is needed, while "L" stands for lock
+ * is needed.
+ *
+ * The list entries deleted via llist_del_all can be traversed with
+ * traversing function such as llist_for_each etc. But the list
+ * entries can not be traversed safely before deleted from the list.
+ * The order of deleted entries is from the newest to the oldest added
+ * one. If you want to traverse from the oldest to the newest, you
+ * must reverse the order by yourself before traversing.
+ *
+ * The basic atomic operation of this list is cmpxchg on long. On
+ * architectures that don't have NMI-safe cmpxchg implementation, the
+ * list can NOT be used in NMI handler. So code uses the list in NMI
+ * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ */
+
+struct llist_head {
+ struct llist_node *first;
+};
+
+struct llist_node {
+ struct llist_node *next;
+};
+
+#define LLIST_HEAD_INIT(name) { NULL }
+#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
+
+/**
+ * init_llist_head - initialize lock-less list head
+ * @head: the head for your lock-less list
+ */
+static inline void init_llist_head(struct llist_head *list)
+{
+ list->first = NULL;
+}
+
+/**
+ * llist_entry - get the struct of this entry
+ * @ptr: the &struct llist_node pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the llist_node within the struct.
+ */
+#define llist_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * llist_for_each - iterate over some deleted entries of a lock-less list
+ * @pos: the &struct llist_node to use as a loop cursor
+ * @node: the first entry of deleted list entries
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being deleted from list, so start with an entry
+ * instead of list head.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry. If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each(pos, node) \
+ for ((pos) = (node); pos; (pos) = (pos)->next)
+
+/**
+ * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @node: the fist entry of deleted list entries.
+ * @member: the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry. If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_entry(pos, node, member) \
+ for ((pos) = llist_entry((node), typeof(*(pos)), member); \
+ &(pos)->member != NULL; \
+ (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * llist_empty - tests whether a lock-less list is empty
+ * @head: the list to test
+ *
+ * Not guaranteed to be accurate or up to date. Just a quick way to
+ * test whether the list is empty without deleting something from the
+ * list.
+ */
+static inline int llist_empty(const struct llist_head *head)
+{
+ return ACCESS_ONCE(head->first) == NULL;
+}
+
+void llist_add(struct llist_node *new, struct llist_head *head);
+void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+ struct llist_head *head);
+struct llist_node *llist_del_first(struct llist_head *head);
+struct llist_node *llist_del_all(struct llist_head *head);
+#endif /* LLIST_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b9660078691..3b535db00a9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -86,8 +86,6 @@ extern void mem_cgroup_uncharge_end(void);
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
-extern int mem_cgroup_shmem_charge_fallback(struct page *page,
- struct mm_struct *mm, gfp_t gfp_mask);
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
@@ -225,12 +223,6 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
{
}
-static inline int mem_cgroup_shmem_charge_fallback(struct page *page,
- struct mm_struct *mm, gfp_t gfp_mask)
-{
- return 0;
-}
-
static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
{
}
diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h
new file mode 100644
index 00000000000..f7316c29bde
--- /dev/null
+++ b/include/linux/mfd/aat2870.h
@@ -0,0 +1,181 @@
+/*
+ * linux/include/linux/mfd/aat2870.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_MFD_AAT2870_H
+#define __LINUX_MFD_AAT2870_H
+
+#include <linux/debugfs.h>
+#include <linux/i2c.h>
+
+/* Register offsets */
+#define AAT2870_BL_CH_EN 0x00
+#define AAT2870_BLM 0x01
+#define AAT2870_BLS 0x02
+#define AAT2870_BL1 0x03
+#define AAT2870_BL2 0x04
+#define AAT2870_BL3 0x05
+#define AAT2870_BL4 0x06
+#define AAT2870_BL5 0x07
+#define AAT2870_BL6 0x08
+#define AAT2870_BL7 0x09
+#define AAT2870_BL8 0x0A
+#define AAT2870_FLR 0x0B
+#define AAT2870_FM 0x0C
+#define AAT2870_FS 0x0D
+#define AAT2870_ALS_CFG0 0x0E
+#define AAT2870_ALS_CFG1 0x0F
+#define AAT2870_ALS_CFG2 0x10
+#define AAT2870_AMB 0x11
+#define AAT2870_ALS0 0x12
+#define AAT2870_ALS1 0x13
+#define AAT2870_ALS2 0x14
+#define AAT2870_ALS3 0x15
+#define AAT2870_ALS4 0x16
+#define AAT2870_ALS5 0x17
+#define AAT2870_ALS6 0x18
+#define AAT2870_ALS7 0x19
+#define AAT2870_ALS8 0x1A
+#define AAT2870_ALS9 0x1B
+#define AAT2870_ALSA 0x1C
+#define AAT2870_ALSB 0x1D
+#define AAT2870_ALSC 0x1E
+#define AAT2870_ALSD 0x1F
+#define AAT2870_ALSE 0x20
+#define AAT2870_ALSF 0x21
+#define AAT2870_SUB_SET 0x22
+#define AAT2870_SUB_CTRL 0x23
+#define AAT2870_LDO_AB 0x24
+#define AAT2870_LDO_CD 0x25
+#define AAT2870_LDO_EN 0x26
+#define AAT2870_REG_NUM 0x27
+
+/* Device IDs */
+enum aat2870_id {
+ AAT2870_ID_BL,
+ AAT2870_ID_LDOA,
+ AAT2870_ID_LDOB,
+ AAT2870_ID_LDOC,
+ AAT2870_ID_LDOD
+};
+
+/* Backlight channels */
+#define AAT2870_BL_CH1 0x01
+#define AAT2870_BL_CH2 0x02
+#define AAT2870_BL_CH3 0x04
+#define AAT2870_BL_CH4 0x08
+#define AAT2870_BL_CH5 0x10
+#define AAT2870_BL_CH6 0x20
+#define AAT2870_BL_CH7 0x40
+#define AAT2870_BL_CH8 0x80
+#define AAT2870_BL_CH_ALL 0xFF
+
+/* Backlight current magnitude (mA) */
+enum aat2870_current {
+ AAT2870_CURRENT_0_45 = 1,
+ AAT2870_CURRENT_0_90,
+ AAT2870_CURRENT_1_80,
+ AAT2870_CURRENT_2_70,
+ AAT2870_CURRENT_3_60,
+ AAT2870_CURRENT_4_50,
+ AAT2870_CURRENT_5_40,
+ AAT2870_CURRENT_6_30,
+ AAT2870_CURRENT_7_20,
+ AAT2870_CURRENT_8_10,
+ AAT2870_CURRENT_9_00,
+ AAT2870_CURRENT_9_90,
+ AAT2870_CURRENT_10_8,
+ AAT2870_CURRENT_11_7,
+ AAT2870_CURRENT_12_6,
+ AAT2870_CURRENT_13_5,
+ AAT2870_CURRENT_14_4,
+ AAT2870_CURRENT_15_3,
+ AAT2870_CURRENT_16_2,
+ AAT2870_CURRENT_17_1,
+ AAT2870_CURRENT_18_0,
+ AAT2870_CURRENT_18_9,
+ AAT2870_CURRENT_19_8,
+ AAT2870_CURRENT_20_7,
+ AAT2870_CURRENT_21_6,
+ AAT2870_CURRENT_22_5,
+ AAT2870_CURRENT_23_4,
+ AAT2870_CURRENT_24_3,
+ AAT2870_CURRENT_25_2,
+ AAT2870_CURRENT_26_1,
+ AAT2870_CURRENT_27_0,
+ AAT2870_CURRENT_27_9
+};
+
+struct aat2870_register {
+ bool readable;
+ bool writeable;
+ u8 value;
+};
+
+struct aat2870_data {
+ struct device *dev;
+ struct i2c_client *client;
+
+ struct mutex io_lock;
+ struct aat2870_register *reg_cache; /* register cache */
+ int en_pin; /* enable GPIO pin (if < 0, ignore this value) */
+ bool is_enable;
+
+ /* init and uninit for platform specified */
+ int (*init)(struct aat2870_data *aat2870);
+ void (*uninit)(struct aat2870_data *aat2870);
+
+ /* i2c io funcntions */
+ int (*read)(struct aat2870_data *aat2870, u8 addr, u8 *val);
+ int (*write)(struct aat2870_data *aat2870, u8 addr, u8 val);
+ int (*update)(struct aat2870_data *aat2870, u8 addr, u8 mask, u8 val);
+
+ /* for debugfs */
+ struct dentry *dentry_root;
+ struct dentry *dentry_reg;
+};
+
+struct aat2870_subdev_info {
+ int id;
+ const char *name;
+ void *platform_data;
+};
+
+struct aat2870_platform_data {
+ int en_pin; /* enable GPIO pin (if < 0, ignore this value) */
+
+ struct aat2870_subdev_info *subdevs;
+ int num_subdevs;
+
+ /* init and uninit for platform specified */
+ int (*init)(struct aat2870_data *aat2870);
+ void (*uninit)(struct aat2870_data *aat2870);
+};
+
+struct aat2870_bl_platform_data {
+ /* backlight channels, default is AAT2870_BL_CH_ALL */
+ int channels;
+ /* backlight current magnitude, default is AAT2870_CURRENT_27_9 */
+ int max_current;
+ /* maximum brightness, default is 255 */
+ int max_brightness;
+};
+
+#endif /* __LINUX_MFD_AAT2870_H */
diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h
index b3184307519..838c6b487cc 100644
--- a/include/linux/mfd/ab8500.h
+++ b/include/linux/mfd/ab8500.h
@@ -28,6 +28,7 @@
#define AB8500_INTERRUPT 0xE
#define AB8500_RTC 0xF
#define AB8500_MISC 0x10
+#define AB8500_DEVELOPMENT 0x11
#define AB8500_DEBUG 0x12
#define AB8500_PROD_TEST 0x13
#define AB8500_OTP_EMUL 0x15
@@ -74,13 +75,6 @@
#define AB8500_INT_ACC_DETECT_21DB_F 37
#define AB8500_INT_ACC_DETECT_21DB_R 38
#define AB8500_INT_GP_SW_ADC_CONV_END 39
-#define AB8500_INT_ACC_DETECT_1DB_F 33
-#define AB8500_INT_ACC_DETECT_1DB_R 34
-#define AB8500_INT_ACC_DETECT_22DB_F 35
-#define AB8500_INT_ACC_DETECT_22DB_R 36
-#define AB8500_INT_ACC_DETECT_21DB_F 37
-#define AB8500_INT_ACC_DETECT_21DB_R 38
-#define AB8500_INT_GP_SW_ADC_CONV_END 39
#define AB8500_INT_GPIO6R 40
#define AB8500_INT_GPIO7R 41
#define AB8500_INT_GPIO8R 42
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index 60931d08942..0bbd13dbe33 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -107,11 +107,16 @@ struct max8997_platform_data {
unsigned int buck5_voltage[8];
bool buck5_gpiodvs;
+ /* ---- Charger control ---- */
+ /* eoc stands for 'end of charge' */
+ int eoc_mA; /* 50 ~ 200mA by 10mA step */
+ /* charge Full Timeout */
+ int timeout; /* 0 (no timeout), 5, 6, 7 hours */
+
/* MUIC: Not implemented */
/* HAPTIC: Not implemented */
/* RTC: Not implemented */
/* Flash: Not implemented */
- /* Charger control: Not implemented */
};
#endif /* __LINUX_MFD_MAX8998_H */
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index 61daa167b57..f4f0dfa4698 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -87,6 +87,15 @@ struct max8998_regulator_data {
* @wakeup: Allow to wake up from suspend
* @rtc_delay: LP3974 RTC chip bug that requires delay after a register
* write before reading it.
+ * @eoc: End of Charge Level in percent: 10% ~ 45% by 5% step
+ * If it equals 0, leave it unchanged.
+ * Otherwise, it is a invalid value.
+ * @restart: Restart Level in mV: 100, 150, 200, and -1 for disable.
+ * If it equals 0, leave it unchanged.
+ * Otherwise, it is a invalid value.
+ * @timeout: Full Timeout in hours: 5, 6, 7, and -1 for disable.
+ * If it equals 0, leave it unchanged.
+ * Otherwise, leave it unchanged.
*/
struct max8998_platform_data {
struct max8998_regulator_data *regulators;
@@ -107,6 +116,9 @@ struct max8998_platform_data {
int buck2_default_idx;
bool wakeup;
bool rtc_delay;
+ int eoc;
+ int restart;
+ int timeout;
};
#endif /* __LINUX_MFD_MAX8998_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index e762c270d8d..be1af7c42e5 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -57,6 +57,7 @@ struct stmpe_variant_info;
* @irq_lock: IRQ bus lock
* @dev: device, mostly for dev_dbg()
* @i2c: i2c client
+ * @partnum: part number
* @variant: the detected STMPE model number
* @regs: list of addresses of registers which are at different addresses on
* different variants. Indexed by one of STMPE_IDX_*.
@@ -121,6 +122,8 @@ struct stmpe_keypad_platform_data {
* @norequest_mask: bitmask specifying which GPIOs should _not_ be
* requestable due to different usage (e.g. touch, keypad)
* STMPE_GPIO_NOREQ_* macros can be used here.
+ * @setup: board specific setup callback.
+ * @remove: board specific remove callback
*/
struct stmpe_gpio_platform_data {
int gpio_base;
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 73572c65d04..82b4c8801a4 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -791,6 +791,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
int tps65910_irq_init(struct tps65910 *tps65910, int irq,
struct tps65910_platform_data *pdata);
+int tps65910_irq_exit(struct tps65910 *tps65910);
static inline int tps65910_chip_id(struct tps65910 *tps65910)
{
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
new file mode 100644
index 00000000000..aaceab402ec
--- /dev/null
+++ b/include/linux/mfd/tps65912.h
@@ -0,0 +1,327 @@
+/*
+ * tps65912.h -- TI TPS6591x
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_TPS65912_H
+#define __LINUX_MFD_TPS65912_H
+
+/* TPS regulator type list */
+#define REGULATOR_LDO 0
+#define REGULATOR_DCDC 1
+
+/*
+ * List of registers for TPS65912
+ */
+
+#define TPS65912_DCDC1_CTRL 0x00
+#define TPS65912_DCDC2_CTRL 0x01
+#define TPS65912_DCDC3_CTRL 0x02
+#define TPS65912_DCDC4_CTRL 0x03
+#define TPS65912_DCDC1_OP 0x04
+#define TPS65912_DCDC1_AVS 0x05
+#define TPS65912_DCDC1_LIMIT 0x06
+#define TPS65912_DCDC2_OP 0x07
+#define TPS65912_DCDC2_AVS 0x08
+#define TPS65912_DCDC2_LIMIT 0x09
+#define TPS65912_DCDC3_OP 0x0A
+#define TPS65912_DCDC3_AVS 0x0B
+#define TPS65912_DCDC3_LIMIT 0x0C
+#define TPS65912_DCDC4_OP 0x0D
+#define TPS65912_DCDC4_AVS 0x0E
+#define TPS65912_DCDC4_LIMIT 0x0F
+#define TPS65912_LDO1_OP 0x10
+#define TPS65912_LDO1_AVS 0x11
+#define TPS65912_LDO1_LIMIT 0x12
+#define TPS65912_LDO2_OP 0x13
+#define TPS65912_LDO2_AVS 0x14
+#define TPS65912_LDO2_LIMIT 0x15
+#define TPS65912_LDO3_OP 0x16
+#define TPS65912_LDO3_AVS 0x17
+#define TPS65912_LDO3_LIMIT 0x18
+#define TPS65912_LDO4_OP 0x19
+#define TPS65912_LDO4_AVS 0x1A
+#define TPS65912_LDO4_LIMIT 0x1B
+#define TPS65912_LDO5 0x1C
+#define TPS65912_LDO6 0x1D
+#define TPS65912_LDO7 0x1E
+#define TPS65912_LDO8 0x1F
+#define TPS65912_LDO9 0x20
+#define TPS65912_LDO10 0x21
+#define TPS65912_THRM 0x22
+#define TPS65912_CLK32OUT 0x23
+#define TPS65912_DEVCTRL 0x24
+#define TPS65912_DEVCTRL2 0x25
+#define TPS65912_I2C_SPI_CFG 0x26
+#define TPS65912_KEEP_ON 0x27
+#define TPS65912_KEEP_ON2 0x28
+#define TPS65912_SET_OFF1 0x29
+#define TPS65912_SET_OFF2 0x2A
+#define TPS65912_DEF_VOLT 0x2B
+#define TPS65912_DEF_VOLT_MAPPING 0x2C
+#define TPS65912_DISCHARGE 0x2D
+#define TPS65912_DISCHARGE2 0x2E
+#define TPS65912_EN1_SET1 0x2F
+#define TPS65912_EN1_SET2 0x30
+#define TPS65912_EN2_SET1 0x31
+#define TPS65912_EN2_SET2 0x32
+#define TPS65912_EN3_SET1 0x33
+#define TPS65912_EN3_SET2 0x34
+#define TPS65912_EN4_SET1 0x35
+#define TPS65912_EN4_SET2 0x36
+#define TPS65912_PGOOD 0x37
+#define TPS65912_PGOOD2 0x38
+#define TPS65912_INT_STS 0x39
+#define TPS65912_INT_MSK 0x3A
+#define TPS65912_INT_STS2 0x3B
+#define TPS65912_INT_MSK2 0x3C
+#define TPS65912_INT_STS3 0x3D
+#define TPS65912_INT_MSK3 0x3E
+#define TPS65912_INT_STS4 0x3F
+#define TPS65912_INT_MSK4 0x40
+#define TPS65912_GPIO1 0x41
+#define TPS65912_GPIO2 0x42
+#define TPS65912_GPIO3 0x43
+#define TPS65912_GPIO4 0x44
+#define TPS65912_GPIO5 0x45
+#define TPS65912_VMON 0x46
+#define TPS65912_LEDA_CTRL1 0x47
+#define TPS65912_LEDA_CTRL2 0x48
+#define TPS65912_LEDA_CTRL3 0x49
+#define TPS65912_LEDA_CTRL4 0x4A
+#define TPS65912_LEDA_CTRL5 0x4B
+#define TPS65912_LEDA_CTRL6 0x4C
+#define TPS65912_LEDA_CTRL7 0x4D
+#define TPS65912_LEDA_CTRL8 0x4E
+#define TPS65912_LEDB_CTRL1 0x4F
+#define TPS65912_LEDB_CTRL2 0x50
+#define TPS65912_LEDB_CTRL3 0x51
+#define TPS65912_LEDB_CTRL4 0x52
+#define TPS65912_LEDB_CTRL5 0x53
+#define TPS65912_LEDB_CTRL6 0x54
+#define TPS65912_LEDB_CTRL7 0x55
+#define TPS65912_LEDB_CTRL8 0x56
+#define TPS65912_LEDC_CTRL1 0x57
+#define TPS65912_LEDC_CTRL2 0x58
+#define TPS65912_LEDC_CTRL3 0x59
+#define TPS65912_LEDC_CTRL4 0x5A
+#define TPS65912_LEDC_CTRL5 0x5B
+#define TPS65912_LEDC_CTRL6 0x5C
+#define TPS65912_LEDC_CTRL7 0x5D
+#define TPS65912_LEDC_CTRL8 0x5E
+#define TPS65912_LED_RAMP_UP_TIME 0x5F
+#define TPS65912_LED_RAMP_DOWN_TIME 0x60
+#define TPS65912_LED_SEQ_EN 0x61
+#define TPS65912_LOADSWITCH 0x62
+#define TPS65912_SPARE 0x63
+#define TPS65912_VERNUM 0x64
+#define TPS6591X_MAX_REGISTER 0x64
+
+/* IRQ Definitions */
+#define TPS65912_IRQ_PWRHOLD_F 0
+#define TPS65912_IRQ_VMON 1
+#define TPS65912_IRQ_PWRON 2
+#define TPS65912_IRQ_PWRON_LP 3
+#define TPS65912_IRQ_PWRHOLD_R 4
+#define TPS65912_IRQ_HOTDIE 5
+#define TPS65912_IRQ_GPIO1_R 6
+#define TPS65912_IRQ_GPIO1_F 7
+#define TPS65912_IRQ_GPIO2_R 8
+#define TPS65912_IRQ_GPIO2_F 9
+#define TPS65912_IRQ_GPIO3_R 10
+#define TPS65912_IRQ_GPIO3_F 11
+#define TPS65912_IRQ_GPIO4_R 12
+#define TPS65912_IRQ_GPIO4_F 13
+#define TPS65912_IRQ_GPIO5_R 14
+#define TPS65912_IRQ_GPIO5_F 15
+#define TPS65912_IRQ_PGOOD_DCDC1 16
+#define TPS65912_IRQ_PGOOD_DCDC2 17
+#define TPS65912_IRQ_PGOOD_DCDC3 18
+#define TPS65912_IRQ_PGOOD_DCDC4 19
+#define TPS65912_IRQ_PGOOD_LDO1 20
+#define TPS65912_IRQ_PGOOD_LDO2 21
+#define TPS65912_IRQ_PGOOD_LDO3 22
+#define TPS65912_IRQ_PGOOD_LDO4 23
+#define TPS65912_IRQ_PGOOD_LDO5 24
+#define TPS65912_IRQ_PGOOD_LDO6 25
+#define TPS65912_IRQ_PGOOD_LDO7 26
+#define TPS65912_IRQ_PGOOD_LD08 27
+#define TPS65912_IRQ_PGOOD_LDO9 28
+#define TPS65912_IRQ_PGOOD_LDO10 29
+
+#define TPS65912_NUM_IRQ 30
+
+/* GPIO 1 and 2 Register Definitions */
+#define GPIO_SLEEP_MASK 0x80
+#define GPIO_SLEEP_SHIFT 7
+#define GPIO_DEB_MASK 0x10
+#define GPIO_DEB_SHIFT 4
+#define GPIO_CFG_MASK 0x04
+#define GPIO_CFG_SHIFT 2
+#define GPIO_STS_MASK 0x02
+#define GPIO_STS_SHIFT 1
+#define GPIO_SET_MASK 0x01
+#define GPIO_SET_SHIFT 0
+
+/* GPIO 3 Register Definitions */
+#define GPIO3_SLEEP_MASK 0x80
+#define GPIO3_SLEEP_SHIFT 7
+#define GPIO3_SEL_MASK 0x40
+#define GPIO3_SEL_SHIFT 6
+#define GPIO3_ODEN_MASK 0x20
+#define GPIO3_ODEN_SHIFT 5
+#define GPIO3_DEB_MASK 0x10
+#define GPIO3_DEB_SHIFT 4
+#define GPIO3_PDEN_MASK 0x08
+#define GPIO3_PDEN_SHIFT 3
+#define GPIO3_CFG_MASK 0x04
+#define GPIO3_CFG_SHIFT 2
+#define GPIO3_STS_MASK 0x02
+#define GPIO3_STS_SHIFT 1
+#define GPIO3_SET_MASK 0x01
+#define GPIO3_SET_SHIFT 0
+
+/* GPIO 4 Register Definitions */
+#define GPIO4_SLEEP_MASK 0x80
+#define GPIO4_SLEEP_SHIFT 7
+#define GPIO4_SEL_MASK 0x40
+#define GPIO4_SEL_SHIFT 6
+#define GPIO4_ODEN_MASK 0x20
+#define GPIO4_ODEN_SHIFT 5
+#define GPIO4_DEB_MASK 0x10
+#define GPIO4_DEB_SHIFT 4
+#define GPIO4_PDEN_MASK 0x08
+#define GPIO4_PDEN_SHIFT 3
+#define GPIO4_CFG_MASK 0x04
+#define GPIO4_CFG_SHIFT 2
+#define GPIO4_STS_MASK 0x02
+#define GPIO4_STS_SHIFT 1
+#define GPIO4_SET_MASK 0x01
+#define GPIO4_SET_SHIFT 0
+
+/* Register THERM (0x80) register.RegisterDescription */
+#define THERM_THERM_HD_MASK 0x20
+#define THERM_THERM_HD_SHIFT 5
+#define THERM_THERM_TS_MASK 0x10
+#define THERM_THERM_TS_SHIFT 4
+#define THERM_THERM_HDSEL_MASK 0x0C
+#define THERM_THERM_HDSEL_SHIFT 2
+#define THERM_RSVD1_MASK 0x02
+#define THERM_RSVD1_SHIFT 1
+#define THERM_THERM_STATE_MASK 0x01
+#define THERM_THERM_STATE_SHIFT 0
+
+/* Register DCDCCTRL1 register.RegisterDescription */
+#define DCDCCTRL_VCON_ENABLE_MASK 0x80
+#define DCDCCTRL_VCON_ENABLE_SHIFT 7
+#define DCDCCTRL_VCON_RANGE1_MASK 0x40
+#define DCDCCTRL_VCON_RANGE1_SHIFT 6
+#define DCDCCTRL_VCON_RANGE0_MASK 0x20
+#define DCDCCTRL_VCON_RANGE0_SHIFT 5
+#define DCDCCTRL_TSTEP2_MASK 0x10
+#define DCDCCTRL_TSTEP2_SHIFT 4
+#define DCDCCTRL_TSTEP1_MASK 0x08
+#define DCDCCTRL_TSTEP1_SHIFT 3
+#define DCDCCTRL_TSTEP0_MASK 0x04
+#define DCDCCTRL_TSTEP0_SHIFT 2
+#define DCDCCTRL_DCDC1_MODE_MASK 0x02
+#define DCDCCTRL_DCDC1_MODE_SHIFT 1
+
+/* Register DCDCCTRL2 and DCDCCTRL3 register.RegisterDescription */
+#define DCDCCTRL_TSTEP2_MASK 0x10
+#define DCDCCTRL_TSTEP2_SHIFT 4
+#define DCDCCTRL_TSTEP1_MASK 0x08
+#define DCDCCTRL_TSTEP1_SHIFT 3
+#define DCDCCTRL_TSTEP0_MASK 0x04
+#define DCDCCTRL_TSTEP0_SHIFT 2
+#define DCDCCTRL_DCDC_MODE_MASK 0x02
+#define DCDCCTRL_DCDC_MODE_SHIFT 1
+#define DCDCCTRL_RSVD0_MASK 0x01
+#define DCDCCTRL_RSVD0_SHIFT 0
+
+/* Register DCDCCTRL4 register.RegisterDescription */
+#define DCDCCTRL_RAMP_TIME_MASK 0x01
+#define DCDCCTRL_RAMP_TIME_SHIFT 0
+
+/* Register DCDCx_AVS */
+#define DCDC_AVS_ENABLE_MASK 0x80
+#define DCDC_AVS_ENABLE_SHIFT 7
+#define DCDC_AVS_ECO_MASK 0x40
+#define DCDC_AVS_ECO_SHIFT 6
+
+/* Register DCDCx_LIMIT */
+#define DCDC_LIMIT_RANGE_MASK 0xC0
+#define DCDC_LIMIT_RANGE_SHIFT 6
+#define DCDC_LIMIT_MAX_SEL_MASK 0x3F
+#define DCDC_LIMIT_MAX_SEL_SHIFT 0
+
+/**
+ * struct tps65912_board
+ * Board platform dat may be used to initialize regulators.
+ */
+struct tps65912_board {
+ int is_dcdc1_avs;
+ int is_dcdc2_avs;
+ int is_dcdc3_avs;
+ int is_dcdc4_avs;
+ int irq;
+ int irq_base;
+ int gpio_base;
+ struct regulator_init_data *tps65912_pmic_init_data;
+};
+
+/**
+ * struct tps65912 - tps65912 sub-driver chip access routines
+ */
+
+struct tps65912 {
+ struct device *dev;
+ /* for read/write acces */
+ struct mutex io_mutex;
+
+ /* For device IO interfaces: I2C or SPI */
+ void *control_data;
+
+ int (*read)(struct tps65912 *tps65912, u8 reg, int size, void *dest);
+ int (*write)(struct tps65912 *tps65912, u8 reg, int size, void *src);
+
+ /* Client devices */
+ struct tps65912_pmic *pmic;
+
+ /* GPIO Handling */
+ struct gpio_chip gpio;
+
+ /* IRQ Handling */
+ struct mutex irq_lock;
+ int chip_irq;
+ int irq_base;
+ int irq_num;
+ u32 irq_mask;
+};
+
+struct tps65912_platform_data {
+ int irq;
+ int irq_base;
+};
+
+unsigned int tps_chip(void);
+
+int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask);
+int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask);
+int tps65912_reg_read(struct tps65912 *tps65912, u8 reg);
+int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val);
+int tps65912_device_init(struct tps65912 *tps65912);
+void tps65912_device_exit(struct tps65912 *tps65912);
+int tps65912_irq_init(struct tps65912 *tps65912, int irq,
+ struct tps65912_platform_data *pdata);
+
+#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 0d515ee1c24..8dda8ded5cd 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/list.h>
/*
* Register values.
@@ -234,9 +235,111 @@
#define WM831X_ON_PIN_TO_SHIFT 0 /* ON_PIN_TO - [1:0] */
#define WM831X_ON_PIN_TO_WIDTH 2 /* ON_PIN_TO - [1:0] */
+/*
+ * R16528 (0x4090) - Clock Control 1
+ */
+#define WM831X_CLKOUT_ENA 0x8000 /* CLKOUT_ENA */
+#define WM831X_CLKOUT_ENA_MASK 0x8000 /* CLKOUT_ENA */
+#define WM831X_CLKOUT_ENA_SHIFT 15 /* CLKOUT_ENA */
+#define WM831X_CLKOUT_ENA_WIDTH 1 /* CLKOUT_ENA */
+#define WM831X_CLKOUT_OD 0x2000 /* CLKOUT_OD */
+#define WM831X_CLKOUT_OD_MASK 0x2000 /* CLKOUT_OD */
+#define WM831X_CLKOUT_OD_SHIFT 13 /* CLKOUT_OD */
+#define WM831X_CLKOUT_OD_WIDTH 1 /* CLKOUT_OD */
+#define WM831X_CLKOUT_SLOT_MASK 0x0700 /* CLKOUT_SLOT - [10:8] */
+#define WM831X_CLKOUT_SLOT_SHIFT 8 /* CLKOUT_SLOT - [10:8] */
+#define WM831X_CLKOUT_SLOT_WIDTH 3 /* CLKOUT_SLOT - [10:8] */
+#define WM831X_CLKOUT_SLPSLOT_MASK 0x0070 /* CLKOUT_SLPSLOT - [6:4] */
+#define WM831X_CLKOUT_SLPSLOT_SHIFT 4 /* CLKOUT_SLPSLOT - [6:4] */
+#define WM831X_CLKOUT_SLPSLOT_WIDTH 3 /* CLKOUT_SLPSLOT - [6:4] */
+#define WM831X_CLKOUT_SRC 0x0001 /* CLKOUT_SRC */
+#define WM831X_CLKOUT_SRC_MASK 0x0001 /* CLKOUT_SRC */
+#define WM831X_CLKOUT_SRC_SHIFT 0 /* CLKOUT_SRC */
+#define WM831X_CLKOUT_SRC_WIDTH 1 /* CLKOUT_SRC */
+
+/*
+ * R16529 (0x4091) - Clock Control 2
+ */
+#define WM831X_XTAL_INH 0x8000 /* XTAL_INH */
+#define WM831X_XTAL_INH_MASK 0x8000 /* XTAL_INH */
+#define WM831X_XTAL_INH_SHIFT 15 /* XTAL_INH */
+#define WM831X_XTAL_INH_WIDTH 1 /* XTAL_INH */
+#define WM831X_XTAL_ENA 0x2000 /* XTAL_ENA */
+#define WM831X_XTAL_ENA_MASK 0x2000 /* XTAL_ENA */
+#define WM831X_XTAL_ENA_SHIFT 13 /* XTAL_ENA */
+#define WM831X_XTAL_ENA_WIDTH 1 /* XTAL_ENA */
+#define WM831X_XTAL_BKUPENA 0x1000 /* XTAL_BKUPENA */
+#define WM831X_XTAL_BKUPENA_MASK 0x1000 /* XTAL_BKUPENA */
+#define WM831X_XTAL_BKUPENA_SHIFT 12 /* XTAL_BKUPENA */
+#define WM831X_XTAL_BKUPENA_WIDTH 1 /* XTAL_BKUPENA */
+#define WM831X_FLL_AUTO 0x0080 /* FLL_AUTO */
+#define WM831X_FLL_AUTO_MASK 0x0080 /* FLL_AUTO */
+#define WM831X_FLL_AUTO_SHIFT 7 /* FLL_AUTO */
+#define WM831X_FLL_AUTO_WIDTH 1 /* FLL_AUTO */
+#define WM831X_FLL_AUTO_FREQ_MASK 0x0007 /* FLL_AUTO_FREQ - [2:0] */
+#define WM831X_FLL_AUTO_FREQ_SHIFT 0 /* FLL_AUTO_FREQ - [2:0] */
+#define WM831X_FLL_AUTO_FREQ_WIDTH 3 /* FLL_AUTO_FREQ - [2:0] */
+
+/*
+ * R16530 (0x4092) - FLL Control 1
+ */
+#define WM831X_FLL_FRAC 0x0004 /* FLL_FRAC */
+#define WM831X_FLL_FRAC_MASK 0x0004 /* FLL_FRAC */
+#define WM831X_FLL_FRAC_SHIFT 2 /* FLL_FRAC */
+#define WM831X_FLL_FRAC_WIDTH 1 /* FLL_FRAC */
+#define WM831X_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */
+#define WM831X_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */
+#define WM831X_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */
+#define WM831X_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */
+#define WM831X_FLL_ENA 0x0001 /* FLL_ENA */
+#define WM831X_FLL_ENA_MASK 0x0001 /* FLL_ENA */
+#define WM831X_FLL_ENA_SHIFT 0 /* FLL_ENA */
+#define WM831X_FLL_ENA_WIDTH 1 /* FLL_ENA */
+
+/*
+ * R16531 (0x4093) - FLL Control 2
+ */
+#define WM831X_FLL_OUTDIV_MASK 0x3F00 /* FLL_OUTDIV - [13:8] */
+#define WM831X_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [13:8] */
+#define WM831X_FLL_OUTDIV_WIDTH 6 /* FLL_OUTDIV - [13:8] */
+#define WM831X_FLL_CTRL_RATE_MASK 0x0070 /* FLL_CTRL_RATE - [6:4] */
+#define WM831X_FLL_CTRL_RATE_SHIFT 4 /* FLL_CTRL_RATE - [6:4] */
+#define WM831X_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [6:4] */
+#define WM831X_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */
+#define WM831X_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */
+#define WM831X_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */
+
+/*
+ * R16532 (0x4094) - FLL Control 3
+ */
+#define WM831X_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */
+#define WM831X_FLL_K_SHIFT 0 /* FLL_K - [15:0] */
+#define WM831X_FLL_K_WIDTH 16 /* FLL_K - [15:0] */
+
+/*
+ * R16533 (0x4095) - FLL Control 4
+ */
+#define WM831X_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */
+#define WM831X_FLL_N_SHIFT 5 /* FLL_N - [14:5] */
+#define WM831X_FLL_N_WIDTH 10 /* FLL_N - [14:5] */
+#define WM831X_FLL_GAIN_MASK 0x000F /* FLL_GAIN - [3:0] */
+#define WM831X_FLL_GAIN_SHIFT 0 /* FLL_GAIN - [3:0] */
+#define WM831X_FLL_GAIN_WIDTH 4 /* FLL_GAIN - [3:0] */
+
+/*
+ * R16534 (0x4096) - FLL Control 5
+ */
+#define WM831X_FLL_CLK_REF_DIV_MASK 0x0018 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM831X_FLL_CLK_REF_DIV_SHIFT 3 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM831X_FLL_CLK_REF_DIV_WIDTH 2 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM831X_FLL_CLK_SRC_MASK 0x0003 /* FLL_CLK_SRC - [1:0] */
+#define WM831X_FLL_CLK_SRC_SHIFT 0 /* FLL_CLK_SRC - [1:0] */
+#define WM831X_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */
+
struct regulator_dev;
#define WM831X_NUM_IRQ_REGS 5
+#define WM831X_NUM_GPIO_REGS 16
enum wm831x_parent {
WM8310 = 0x8310,
@@ -248,6 +351,12 @@ enum wm831x_parent {
WM8326 = 0x8326,
};
+struct wm831x;
+enum wm831x_auxadc;
+
+typedef int (*wm831x_auxadc_read_fn)(struct wm831x *wm831x,
+ enum wm831x_auxadc input);
+
struct wm831x {
struct mutex io_lock;
@@ -261,7 +370,7 @@ struct wm831x {
int irq; /* Our chip IRQ */
struct mutex irq_lock;
- unsigned int irq_base;
+ int irq_base;
int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
@@ -272,8 +381,13 @@ struct wm831x {
int num_gpio;
+ /* Used by the interrupt controller code to post writes */
+ int gpio_update[WM831X_NUM_GPIO_REGS];
+
struct mutex auxadc_lock;
- struct completion auxadc_done;
+ struct list_head auxadc_pending;
+ u16 auxadc_active;
+ wm831x_auxadc_read_fn auxadc_read;
/* The WM831x has a security key blocking access to certain
* registers. The mutex is taken by the accessors for locking
@@ -300,5 +414,6 @@ void wm831x_device_exit(struct wm831x *wm831x);
int wm831x_device_suspend(struct wm831x *wm831x);
int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
+void wm831x_auxadc_init(struct wm831x *wm831x);
#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index ff42d700293..0ba24599fe5 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -120,6 +120,9 @@ struct wm831x_pdata {
/** Put the /IRQ line into CMOS mode */
bool irq_cmos;
+ /** Disable the touchscreen */
+ bool disable_touch;
+
int irq_base;
int gpio_base;
int gpio_defaults[WM831X_GPIO_NUM];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3172a1c0f08..f2690cf4982 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1600,6 +1600,7 @@ enum mf_flags {
};
extern void memory_failure(unsigned long pfn, int trapno);
extern int __memory_failure(unsigned long pfn, int trapno, int flags);
+extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index f387919bbc5..8c6ee44914c 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -29,6 +29,8 @@
#define NFS_MNT_VERSION 1
#define NFS_MNT3_VERSION 3
+#define NFS_PIPE_DIRNAME "/nfs"
+
/*
* NFS stats. The good thing with these values is that NFSv3 errors are
* a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index a3c4bc800dc..76f99e8714f 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -566,6 +566,7 @@ enum {
NFSPROC4_CLNT_SECINFO_NO_NAME,
NFSPROC4_CLNT_TEST_STATEID,
NFSPROC4_CLNT_FREE_STATEID,
+ NFSPROC4_CLNT_GETDEVICELIST,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 8b579beb635..eaac770f886 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -99,9 +99,10 @@ struct nfs_open_context {
struct nfs_open_dir_context {
struct rpc_cred *cred;
+ unsigned long attr_gencount;
__u64 dir_cookie;
__u64 dup_cookie;
- int duped;
+ signed char duped;
};
/*
@@ -568,12 +569,12 @@ extern struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type);
extern int nfs3_proc_setacl(struct inode *inode, int type,
struct posix_acl *acl);
extern int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
- mode_t mode);
+ umode_t mode);
extern void nfs3_forget_cached_acls(struct inode *inode);
#else
static inline int nfs3_proc_set_default_acl(struct inode *dir,
struct inode *inode,
- mode_t mode)
+ umode_t mode)
{
return 0;
}
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 50a661f8b45..b5479df8378 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -131,8 +131,9 @@ struct nfs_server {
struct fscache_cookie *fscache; /* superblock cookie */
#endif
+ u32 pnfs_blksize; /* layout_blksize attr */
#ifdef CONFIG_NFS_V4
- u32 attr_bitmask[2];/* V4 bitmask representing the set
+ u32 attr_bitmask[3];/* V4 bitmask representing the set
of attributes supported on this
filesystem */
u32 cache_consistency_bitmask[2];
@@ -145,6 +146,7 @@ struct nfs_server {
filesystem */
struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */
struct rpc_wait_queue roc_rpcwaitq;
+ void *pnfs_ld_data; /* per mount point data */
/* the following fields are protected by nfs_client->cl_lock */
struct rb_root state_owners;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 5b115956aba..abd615d74a2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -122,6 +122,7 @@ struct nfs_fsinfo {
struct timespec time_delta; /* server time granularity */
__u32 lease_time; /* in seconds */
__u32 layouttype; /* supported pnfs layout driver */
+ __u32 blksize; /* preferred pnfs io block size */
};
struct nfs_fsstat {
@@ -235,6 +236,17 @@ struct nfs4_layoutget {
gfp_t gfp_flags;
};
+struct nfs4_getdevicelist_args {
+ const struct nfs_fh *fh;
+ u32 layoutclass;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_getdevicelist_res {
+ struct pnfs_devicelist *devlist;
+ struct nfs4_sequence_res seq_res;
+};
+
struct nfs4_getdeviceinfo_args {
struct pnfs_device *pdev;
struct nfs4_sequence_args seq_args;
@@ -257,12 +269,13 @@ struct nfs4_layoutcommit_res {
struct nfs_fattr *fattr;
const struct nfs_server *server;
struct nfs4_sequence_res seq_res;
+ int status;
};
struct nfs4_layoutcommit_data {
struct rpc_task task;
struct nfs_fattr fattr;
- struct pnfs_layout_segment *lseg;
+ struct list_head lseg_list;
struct rpc_cred *cred;
struct nfs4_layoutcommit_args args;
struct nfs4_layoutcommit_res res;
@@ -760,6 +773,11 @@ struct nfs3_getaclres {
struct posix_acl * acl_default;
};
+struct nfs4_string {
+ unsigned int len;
+ char *data;
+};
+
#ifdef CONFIG_NFS_V4
typedef u64 clientid4;
@@ -943,18 +961,13 @@ struct nfs4_server_caps_arg {
};
struct nfs4_server_caps_res {
- u32 attr_bitmask[2];
+ u32 attr_bitmask[3];
u32 acl_bitmask;
u32 has_links;
u32 has_symlinks;
struct nfs4_sequence_res seq_res;
};
-struct nfs4_string {
- unsigned int len;
- char *data;
-};
-
#define NFS4_PATHNAME_MAXCOMPONENTS 512
struct nfs4_pathname {
unsigned int ncomponents;
diff --git a/include/linux/of.h b/include/linux/of.h
index bd716f8908d..0085bb01c04 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -196,12 +196,13 @@ extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);
extern int of_property_read_u32_array(const struct device_node *np,
- char *propname,
+ const char *propname,
u32 *out_values,
size_t sz);
-extern int of_property_read_string(struct device_node *np, char *propname,
- const char **out_string);
+extern int of_property_read_string(struct device_node *np,
+ const char *propname,
+ const char **out_string);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
extern int of_device_is_available(const struct device_node *device);
@@ -242,13 +243,15 @@ static inline bool of_have_populated_dt(void)
}
static inline int of_property_read_u32_array(const struct device_node *np,
- char *propname, u32 *out_values, size_t sz)
+ const char *propname,
+ u32 *out_values, size_t sz)
{
return -ENOSYS;
}
static inline int of_property_read_string(struct device_node *np,
- char *propname, const char **out_string)
+ const char *propname,
+ const char **out_string)
{
return -ENOSYS;
}
@@ -256,7 +259,7 @@ static inline int of_property_read_string(struct device_node *np,
#endif /* CONFIG_OF */
static inline int of_property_read_u32(const struct device_node *np,
- char *propname,
+ const char *propname,
u32 *out_value)
{
return of_property_read_u32_array(np, propname, out_value, 1);
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index aec8025c786..52280a2b5e6 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -57,6 +57,8 @@ extern int of_mm_gpiochip_add(struct device_node *np,
extern void of_gpiochip_add(struct gpio_chip *gc);
extern void of_gpiochip_remove(struct gpio_chip *gc);
extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np);
+extern int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
+ const void *gpio_spec, u32 *flags);
#else /* CONFIG_OF_GPIO */
@@ -72,6 +74,13 @@ static inline unsigned int of_gpio_count(struct device_node *np)
return 0;
}
+static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
+ struct device_node *np,
+ const void *gpio_spec, u32 *flags)
+{
+ return -ENOSYS;
+}
+
static inline void of_gpiochip_add(struct gpio_chip *gc) { }
static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index b00c4ec5056..ae96bbe5451 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2709,6 +2709,16 @@
#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB3 0x3c23
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB4 0x3c24
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB5 0x3c25
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB6 0x3c26
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 9a53b99818e..b7681102a4b 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -9,6 +9,7 @@
#define __LINUX_POSIX_ACL_H
#include <linux/slab.h>
+#include <linux/rcupdate.h>
#define ACL_UNDEFINED_ID (-1)
@@ -38,7 +39,10 @@ struct posix_acl_entry {
};
struct posix_acl {
- atomic_t a_refcount;
+ union {
+ atomic_t a_refcount;
+ struct rcu_head a_rcu;
+ };
unsigned int a_count;
struct posix_acl_entry a_entries[0];
};
@@ -65,7 +69,7 @@ static inline void
posix_acl_release(struct posix_acl *acl)
{
if (acl && atomic_dec_and_test(&acl->a_refcount))
- kfree(acl);
+ kfree_rcu(acl, a_rcu);
}
@@ -75,29 +79,31 @@ extern void posix_acl_init(struct posix_acl *, int);
extern struct posix_acl *posix_acl_alloc(int, gfp_t);
extern int posix_acl_valid(const struct posix_acl *);
extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
-extern struct posix_acl *posix_acl_from_mode(mode_t, gfp_t);
-extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *);
-extern int posix_acl_create(struct posix_acl **, gfp_t, mode_t *);
-extern int posix_acl_chmod(struct posix_acl **, gfp_t, mode_t);
+extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
+extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
+extern int posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
+extern int posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int);
extern int set_posix_acl(struct inode *, int, struct posix_acl *);
#ifdef CONFIG_FS_POSIX_ACL
-static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
+static inline struct posix_acl **acl_by_type(struct inode *inode, int type)
{
- struct posix_acl **p, *acl;
switch (type) {
case ACL_TYPE_ACCESS:
- p = &inode->i_acl;
- break;
+ return &inode->i_acl;
case ACL_TYPE_DEFAULT:
- p = &inode->i_default_acl;
- break;
+ return &inode->i_default_acl;
default:
- return ERR_PTR(-EINVAL);
+ BUG();
}
- acl = ACCESS_ONCE(*p);
+}
+
+static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
+{
+ struct posix_acl **p = acl_by_type(inode, type);
+ struct posix_acl *acl = ACCESS_ONCE(*p);
if (acl) {
spin_lock(&inode->i_lock);
acl = *p;
@@ -108,41 +114,20 @@ static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
return acl;
}
-static inline int negative_cached_acl(struct inode *inode, int type)
+static inline struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type)
{
- struct posix_acl **p, *acl;
- switch (type) {
- case ACL_TYPE_ACCESS:
- p = &inode->i_acl;
- break;
- case ACL_TYPE_DEFAULT:
- p = &inode->i_default_acl;
- break;
- default:
- BUG();
- }
- acl = ACCESS_ONCE(*p);
- if (acl)
- return 0;
- return 1;
+ return rcu_dereference(*acl_by_type(inode, type));
}
static inline void set_cached_acl(struct inode *inode,
int type,
struct posix_acl *acl)
{
- struct posix_acl *old = NULL;
+ struct posix_acl **p = acl_by_type(inode, type);
+ struct posix_acl *old;
spin_lock(&inode->i_lock);
- switch (type) {
- case ACL_TYPE_ACCESS:
- old = inode->i_acl;
- inode->i_acl = posix_acl_dup(acl);
- break;
- case ACL_TYPE_DEFAULT:
- old = inode->i_default_acl;
- inode->i_default_acl = posix_acl_dup(acl);
- break;
- }
+ old = *p;
+ rcu_assign_pointer(*p, posix_acl_dup(acl));
spin_unlock(&inode->i_lock);
if (old != ACL_NOT_CACHED)
posix_acl_release(old);
@@ -150,18 +135,11 @@ static inline void set_cached_acl(struct inode *inode,
static inline void forget_cached_acl(struct inode *inode, int type)
{
- struct posix_acl *old = NULL;
+ struct posix_acl **p = acl_by_type(inode, type);
+ struct posix_acl *old;
spin_lock(&inode->i_lock);
- switch (type) {
- case ACL_TYPE_ACCESS:
- old = inode->i_acl;
- inode->i_acl = ACL_NOT_CACHED;
- break;
- case ACL_TYPE_DEFAULT:
- old = inode->i_default_acl;
- inode->i_default_acl = ACL_NOT_CACHED;
- break;
- }
+ old = *p;
+ *p = ACL_NOT_CACHED;
spin_unlock(&inode->i_lock);
if (old != ACL_NOT_CACHED)
posix_acl_release(old);
diff --git a/include/linux/power/bq20z75.h b/include/linux/power/bq20z75.h
index b0843b68af9..1398eb004e8 100644
--- a/include/linux/power/bq20z75.h
+++ b/include/linux/power/bq20z75.h
@@ -29,11 +29,14 @@
* @battery_detect: GPIO which is used to detect battery presence
* @battery_detect_present: gpio state when battery is present (0 / 1)
* @i2c_retry_count: # of times to retry on i2c IO failure
+ * @poll_retry_count: # of times to retry looking for new status after
+ * external change notification
*/
struct bq20z75_platform_data {
int battery_detect;
int battery_detect_present;
int i2c_retry_count;
+ int poll_retry_count;
};
#endif
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index 7995deb8bfc..fe99211fb2b 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -23,8 +23,99 @@
#ifndef __MAX17042_BATTERY_H_
#define __MAX17042_BATTERY_H_
+#define MAX17042_STATUS_BattAbsent (1 << 3)
+#define MAX17042_BATTERY_FULL (100)
+#define MAX17042_DEFAULT_SNS_RESISTOR (10000)
+
+enum max17042_register {
+ MAX17042_STATUS = 0x00,
+ MAX17042_VALRT_Th = 0x01,
+ MAX17042_TALRT_Th = 0x02,
+ MAX17042_SALRT_Th = 0x03,
+ MAX17042_AtRate = 0x04,
+ MAX17042_RepCap = 0x05,
+ MAX17042_RepSOC = 0x06,
+ MAX17042_Age = 0x07,
+ MAX17042_TEMP = 0x08,
+ MAX17042_VCELL = 0x09,
+ MAX17042_Current = 0x0A,
+ MAX17042_AvgCurrent = 0x0B,
+ MAX17042_Qresidual = 0x0C,
+ MAX17042_SOC = 0x0D,
+ MAX17042_AvSOC = 0x0E,
+ MAX17042_RemCap = 0x0F,
+ MAX17402_FullCAP = 0x10,
+ MAX17042_TTE = 0x11,
+ MAX17042_V_empty = 0x12,
+
+ MAX17042_RSLOW = 0x14,
+
+ MAX17042_AvgTA = 0x16,
+ MAX17042_Cycles = 0x17,
+ MAX17042_DesignCap = 0x18,
+ MAX17042_AvgVCELL = 0x19,
+ MAX17042_MinMaxTemp = 0x1A,
+ MAX17042_MinMaxVolt = 0x1B,
+ MAX17042_MinMaxCurr = 0x1C,
+ MAX17042_CONFIG = 0x1D,
+ MAX17042_ICHGTerm = 0x1E,
+ MAX17042_AvCap = 0x1F,
+ MAX17042_ManName = 0x20,
+ MAX17042_DevName = 0x21,
+ MAX17042_DevChem = 0x22,
+
+ MAX17042_TempNom = 0x24,
+ MAX17042_TempCold = 0x25,
+ MAX17042_TempHot = 0x26,
+ MAX17042_AIN = 0x27,
+ MAX17042_LearnCFG = 0x28,
+ MAX17042_SHFTCFG = 0x29,
+ MAX17042_RelaxCFG = 0x2A,
+ MAX17042_MiscCFG = 0x2B,
+ MAX17042_TGAIN = 0x2C,
+ MAx17042_TOFF = 0x2D,
+ MAX17042_CGAIN = 0x2E,
+ MAX17042_COFF = 0x2F,
+
+ MAX17042_Q_empty = 0x33,
+ MAX17042_T_empty = 0x34,
+
+ MAX17042_RCOMP0 = 0x38,
+ MAX17042_TempCo = 0x39,
+ MAX17042_Rx = 0x3A,
+ MAX17042_T_empty0 = 0x3B,
+ MAX17042_TaskPeriod = 0x3C,
+ MAX17042_FSTAT = 0x3D,
+
+ MAX17042_SHDNTIMER = 0x3F,
+
+ MAX17042_VFRemCap = 0x4A,
+
+ MAX17042_QH = 0x4D,
+ MAX17042_QL = 0x4E,
+};
+
+/*
+ * used for setting a register to a desired value
+ * addr : address for a register
+ * data : setting value for the register
+ */
+struct max17042_reg_data {
+ u8 addr;
+ u16 data;
+};
+
struct max17042_platform_data {
+ struct max17042_reg_data *init_data;
+ int num_init_data; /* Number of enties in init_data array */
bool enable_current_sense;
+
+ /*
+ * R_sns in micro-ohms.
+ * default 10000 (if r_sns = 0) as it is the recommended value by
+ * the datasheet although it can be changed by board designers.
+ */
+ unsigned int r_sns;
};
#endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 2455ef2683f..cc03bbf5c4b 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -38,9 +38,12 @@ struct pstore_info {
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
- struct timespec *time);
- u64 (*write)(enum pstore_type_id type, size_t size);
- int (*erase)(u64 id);
+ struct timespec *time, struct pstore_info *psi);
+ u64 (*write)(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi);
+ int (*erase)(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi);
+ void *data;
};
#ifdef CONFIG_PSTORE
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 23241c2fecc..9d4539c52e5 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -39,7 +39,15 @@
* when it is shrunk, before we rcu free the node. See shrink code for
* details.
*/
-#define RADIX_TREE_INDIRECT_PTR 1
+#define RADIX_TREE_INDIRECT_PTR 1
+/*
+ * A common use of the radix tree is to store pointers to struct pages;
+ * but shmem/tmpfs needs also to store swap entries in the same tree:
+ * those are marked as exceptional entries to distinguish them.
+ * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
+ */
+#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
+#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
#define radix_tree_indirect_to_ptr(ptr) \
radix_tree_indirect_to_ptr((void __force *)(ptr))
@@ -174,6 +182,28 @@ static inline int radix_tree_deref_retry(void *arg)
}
/**
+ * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
+ * @arg: value returned by radix_tree_deref_slot
+ * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
+ */
+static inline int radix_tree_exceptional_entry(void *arg)
+{
+ /* Not unlikely because radix_tree_exception often tested first */
+ return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
+}
+
+/**
+ * radix_tree_exception - radix_tree_deref_slot returned either exception?
+ * @arg: value returned by radix_tree_deref_slot
+ * Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
+ */
+static inline int radix_tree_exception(void *arg)
+{
+ return unlikely((unsigned long)arg &
+ (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY));
+}
+
+/**
* radix_tree_replace_slot - replace item in a slot
* @pslot: pointer to slot, returned by radix_tree_lookup_slot
* @item: new item to store in the slot.
@@ -194,8 +224,8 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items);
-unsigned int
-radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
+unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
+ void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
@@ -222,6 +252,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
unsigned long nr_to_tag,
unsigned int fromtag, unsigned int totag);
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
+unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
static inline void radix_tree_preload_end(void)
{
diff --git a/include/linux/random.h b/include/linux/random.h
index ce29a040c8d..d13059f3ea3 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -57,18 +57,6 @@ extern void add_interrupt_randomness(int irq);
extern void get_random_bytes(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
-extern __u32 secure_ip_id(__be32 daddr);
-extern __u32 secure_ipv6_id(const __be32 daddr[4]);
-extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
-extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
- __be16 dport);
-extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport);
-extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
- __be16 sport, __be16 dport);
-extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport);
-
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 9e87c1cb727..26f6ea4444e 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -122,6 +122,9 @@ struct regulator;
struct regulator_bulk_data {
const char *supply;
struct regulator *consumer;
+
+ /* Internal use */
+ int ret;
};
#if defined(CONFIG_REGULATOR)
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 6c433b89c80..1a80bc77517 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -188,18 +188,16 @@ struct regulator_dev {
/* lists we belong to */
struct list_head list; /* list of all regulators */
- struct list_head slist; /* list of supplied regulators */
/* lists we own */
struct list_head consumer_list; /* consumers we supply */
- struct list_head supply_list; /* regulators we supply */
struct blocking_notifier_head notifier;
struct mutex mutex; /* consumer lock */
struct module *owner;
struct device dev;
struct regulation_constraints *constraints;
- struct regulator_dev *supply; /* for tree */
+ struct regulator *supply; /* for tree */
void *reg_data; /* regulator_dev data */
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index a2afc9fbe18..8bffe9ae2ca 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -8,6 +8,8 @@
* Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts)
*/
+#define SCIx_NOT_SUPPORTED (-1)
+
enum {
SCBRR_ALGO_1, /* ((clk + 16 * bps) / (16 * bps) - 1) */
SCBRR_ALGO_2, /* ((clk + 16 * bps) / (32 * bps) - 1) */
@@ -25,6 +27,28 @@ enum {
#define SCSCR_CKE1 (1 << 1)
#define SCSCR_CKE0 (1 << 0)
+/* SCxSR SCI */
+#define SCI_TDRE 0x80
+#define SCI_RDRF 0x40
+#define SCI_ORER 0x20
+#define SCI_FER 0x10
+#define SCI_PER 0x08
+#define SCI_TEND 0x04
+
+#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER)
+
+/* SCxSR SCIF */
+#define SCIF_ER 0x0080
+#define SCIF_TEND 0x0040
+#define SCIF_TDFE 0x0020
+#define SCIF_BRK 0x0010
+#define SCIF_FER 0x0008
+#define SCIF_PER 0x0004
+#define SCIF_RDF 0x0002
+#define SCIF_DR 0x0001
+
+#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
+
/* Offsets into the sci_port->irqs array */
enum {
SCIx_ERI_IRQ,
@@ -32,6 +56,24 @@ enum {
SCIx_TXI_IRQ,
SCIx_BRI_IRQ,
SCIx_NR_IRQS,
+
+ SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */
+};
+
+enum {
+ SCIx_PROBE_REGTYPE,
+
+ SCIx_SCI_REGTYPE,
+ SCIx_IRDA_REGTYPE,
+ SCIx_SCIFA_REGTYPE,
+ SCIx_SCIFB_REGTYPE,
+ SCIx_SH3_SCIF_REGTYPE,
+ SCIx_SH4_SCIF_REGTYPE,
+ SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+ SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+ SCIx_SH7705_SCIF_REGTYPE,
+
+ SCIx_NR_REGTYPES,
};
#define SCIx_IRQ_MUXED(irq) \
@@ -42,8 +84,29 @@ enum {
[SCIx_BRI_IRQ] = (irq), \
}
+#define SCIx_IRQ_IS_MUXED(port) \
+ ((port)->cfg->irqs[SCIx_ERI_IRQ] == \
+ (port)->cfg->irqs[SCIx_RXI_IRQ]) || \
+ ((port)->cfg->irqs[SCIx_ERI_IRQ] && \
+ !(port)->cfg->irqs[SCIx_RXI_IRQ])
+/*
+ * SCI register subset common for all port types.
+ * Not all registers will exist on all parts.
+ */
+enum {
+ SCSMR, SCBRR, SCSCR, SCxSR,
+ SCFCR, SCFDR, SCxTDR, SCxRDR,
+ SCLSR, SCTFDR, SCRFDR, SCSPTR,
+
+ SCIx_NR_REGS,
+};
+
struct device;
+struct plat_sci_port_ops {
+ void (*init_pins)(struct uart_port *, unsigned int cflag);
+};
+
/*
* Platform device specific platform_data struct
*/
@@ -56,6 +119,18 @@ struct plat_sci_port {
unsigned int scbrr_algo_id; /* SCBRR calculation algo */
unsigned int scscr; /* SCSCR initialization */
+ /*
+ * Platform overrides if necessary, defaults otherwise.
+ */
+ int overrun_bit;
+ unsigned int error_mask;
+
+ int port_reg;
+ unsigned char regshift;
+ unsigned char regtype;
+
+ struct plat_sci_port_ops *ops;
+
struct device *dma_dev;
unsigned int dma_slave_tx;
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index 9a52f72527d..3ccf18648d0 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -147,4 +147,8 @@ int sh_clk_div4_reparent_register(struct clk *clks, int nr,
int sh_clk_div6_register(struct clk *clks, int nr);
int sh_clk_div6_reparent_register(struct clk *clks, int nr);
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
+#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
+
#endif /* __SH_CLOCK_H */
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
index b08cd4efa15..cb2dd118cc0 100644
--- a/include/linux/sh_dma.h
+++ b/include/linux/sh_dma.h
@@ -62,6 +62,12 @@ struct sh_dmae_pdata {
const unsigned int *ts_shift;
int ts_shift_num;
u16 dmaor_init;
+ unsigned int chcr_offset;
+ u32 chcr_ie_bit;
+
+ unsigned int dmaor_is_32bit:1;
+ unsigned int needs_tend_set:1;
+ unsigned int no_dmars:1;
};
/* DMA register */
@@ -71,6 +77,8 @@ struct sh_dmae_pdata {
#define CHCR 0x0C
#define DMAOR 0x40
+#define TEND 0x18 /* USB-DMAC */
+
/* DMAOR definitions */
#define DMAOR_AE 0x00000004
#define DMAOR_NMIF 0x00000002
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index aa08fa8fd79..9291ac3cc62 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -8,22 +8,15 @@
/* inode in-kernel data */
-#define SHMEM_NR_DIRECT 16
-
-#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
-
struct shmem_inode_info {
spinlock_t lock;
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
- unsigned long swapped; /* subtotal assigned to swap */
- unsigned long next_index; /* highest alloced index + 1 */
- struct shared_policy policy; /* NUMA memory alloc policy */
- struct page *i_indirect; /* top indirect blocks page */
union {
- swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */
- char inline_symlink[SHMEM_SYMLINK_INLINE_LEN];
+ unsigned long swapped; /* subtotal assigned to swap */
+ char *symlink; /* unswappable short symlink */
};
+ struct shared_policy policy; /* NUMA memory alloc policy */
struct list_head swaplist; /* chain of maybes on swap */
struct list_head xattr_list; /* list of shmem_xattr */
struct inode vfs_inode;
@@ -49,7 +42,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
/*
* Functions in mm/shmem.c called directly from elsewhere:
*/
-extern int init_tmpfs(void);
+extern int shmem_init(void);
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
@@ -59,8 +52,6 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
extern int shmem_unuse(swp_entry_t entry, struct page *page);
-extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
- struct page **pagep, swp_entry_t *ent);
static inline struct page *shmem_read_mapping_page(
struct address_space *mapping, pgoff_t index)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index cd42e30b7c6..2189d3ffc85 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -1,3 +1,8 @@
+#ifndef _LINUX_SWAPOPS_H
+#define _LINUX_SWAPOPS_H
+
+#include <linux/radix-tree.h>
+
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
* get good packing density in that tree, so the index should be dense in
@@ -76,6 +81,22 @@ static inline pte_t swp_entry_to_pte(swp_entry_t entry)
return __swp_entry_to_pte(arch_entry);
}
+static inline swp_entry_t radix_to_swp_entry(void *arg)
+{
+ swp_entry_t entry;
+
+ entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+ return entry;
+}
+
+static inline void *swp_to_radix_entry(swp_entry_t entry)
+{
+ unsigned long value;
+
+ value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
+ return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
+}
+
#ifdef CONFIG_MIGRATION
static inline swp_entry_t make_migration_entry(struct page *page, int write)
{
@@ -169,3 +190,5 @@ static inline int non_swap_entry(swp_entry_t entry)
return 0;
}
#endif
+
+#endif /* _LINUX_SWAPOPS_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index d3ec89fb412..47b4a27e6e9 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -85,22 +85,6 @@ struct thermal_cooling_device {
((long)t-2732+5)/10 : ((long)t-2732-5)/10)
#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
-#if defined(CONFIG_THERMAL_HWMON)
-/* thermal zone devices with the same type share one hwmon device */
-struct thermal_hwmon_device {
- char type[THERMAL_NAME_LENGTH];
- struct device *device;
- int count;
- struct list_head tz_list;
- struct list_head node;
-};
-
-struct thermal_hwmon_attr {
- struct device_attribute attr;
- char name[16];
-};
-#endif
-
struct thermal_zone_device {
int id;
char type[THERMAL_NAME_LENGTH];
@@ -120,12 +104,6 @@ struct thermal_zone_device {
struct mutex lock; /* protect cooling devices list */
struct list_head node;
struct delayed_work poll_queue;
-#if defined(CONFIG_THERMAL_HWMON)
- struct list_head hwmon_node;
- struct thermal_hwmon_device *hwmon;
- struct thermal_hwmon_attr temp_input; /* hwmon sys attr */
- struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */
-#endif
};
/* Adding event notification support elements */
#define THERMAL_GENL_FAMILY_NAME "thermal_event"
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 3b938743514..9808877c2ab 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -8,7 +8,7 @@
* have chosen to adopt the protocol and over the years it has become a
* de-facto standard for labeled networking.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/include/net/dst.h b/include/net/dst.h
index 29e255796ce..13d507d69dd 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -37,7 +37,7 @@ struct dst_entry {
unsigned long _metrics;
unsigned long expires;
struct dst_entry *path;
- struct neighbour *_neighbour;
+ struct neighbour __rcu *_neighbour;
#ifdef CONFIG_XFRM
struct xfrm_state *xfrm;
#else
@@ -88,12 +88,17 @@ struct dst_entry {
static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst)
{
- return dst->_neighbour;
+ return rcu_dereference(dst->_neighbour);
+}
+
+static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst)
+{
+ return rcu_dereference_raw(dst->_neighbour);
}
static inline void dst_set_neighbour(struct dst_entry *dst, struct neighbour *neigh)
{
- dst->_neighbour = neigh;
+ rcu_assign_pointer(dst->_neighbour, neigh);
}
extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
@@ -382,8 +387,12 @@ static inline void dst_rcu_free(struct rcu_head *head)
static inline void dst_confirm(struct dst_entry *dst)
{
if (dst) {
- struct neighbour *n = dst_get_neighbour(dst);
+ struct neighbour *n;
+
+ rcu_read_lock();
+ n = dst_get_neighbour(dst);
neigh_confirm(n);
+ rcu_read_unlock();
}
}
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index f21a16ee370..f67440970d7 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -4,7 +4,7 @@
* The NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
new file mode 100644
index 00000000000..d97f6892c01
--- /dev/null
+++ b/include/net/secure_seq.h
@@ -0,0 +1,20 @@
+#ifndef _NET_SECURE_SEQ
+#define _NET_SECURE_SEQ
+
+#include <linux/types.h>
+
+extern __u32 secure_ip_id(__be32 daddr);
+extern __u32 secure_ipv6_id(const __be32 daddr[4]);
+extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport);
+extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport);
+extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport);
+
+#endif /* _NET_SECURE_SEQ */
diff --git a/include/pcmcia/device_id.h b/include/pcmcia/device_id.h
index 63e5b8f6b7d..00dbfac9c6e 100644
--- a/include/pcmcia/device_id.h
+++ b/include/pcmcia/device_id.h
@@ -95,6 +95,15 @@
.prod_id = { (v1), NULL, NULL, NULL }, \
.prod_id_hash = { (vh1), 0, 0, 0 }, }
+#define PCMCIA_DEVICE_MANF_CARD_PROD_ID3(manf, card, v3, vh3) { \
+ .match_flags = PCMCIA_DEV_ID_MATCH_MANF_ID| \
+ PCMCIA_DEV_ID_MATCH_CARD_ID| \
+ PCMCIA_DEV_ID_MATCH_PROD_ID3, \
+ .manf_id = (manf), \
+ .card_id = (card), \
+ .prod_id = { NULL, NULL, (v3), NULL }, \
+ .prod_id_hash = { 0, 0, (vh3), 0 }, }
+
/* multi-function devices */
diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
new file mode 100644
index 00000000000..c5c5e008e6d
--- /dev/null
+++ b/include/scsi/osd_ore.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2011
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * Public Declarations of the ORE API
+ *
+ * This file is part of the ORE (Object Raid Engine) library.
+ *
+ * ORE is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation. (GPL v2)
+ *
+ * ORE is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the ORE; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __ORE_H__
+#define __ORE_H__
+
+#include <scsi/osd_initiator.h>
+#include <scsi/osd_attributes.h>
+#include <scsi/osd_sec.h>
+#include <linux/pnfs_osd_xdr.h>
+
+struct ore_comp {
+ struct osd_obj_id obj;
+ u8 cred[OSD_CAP_LEN];
+};
+
+struct ore_layout {
+ /* Our way of looking at the data_map */
+ unsigned stripe_unit;
+ unsigned mirrors_p1;
+
+ unsigned group_width;
+ u64 group_depth;
+ unsigned group_count;
+};
+
+struct ore_components {
+ unsigned numdevs; /* Num of devices in array */
+ /* If @single_comp == EC_SINGLE_COMP, @comps points to a single
+ * component. else there are @numdevs components
+ */
+ enum EC_COMP_USAGE {
+ EC_SINGLE_COMP = 0, EC_MULTPLE_COMPS = 0xffffffff
+ } single_comp;
+ struct ore_comp *comps;
+ struct osd_dev **ods; /* osd_dev array */
+};
+
+struct ore_io_state;
+typedef void (*ore_io_done_fn)(struct ore_io_state *ios, void *private);
+
+struct ore_io_state {
+ struct kref kref;
+
+ void *private;
+ ore_io_done_fn done;
+
+ struct ore_layout *layout;
+ struct ore_components *comps;
+
+ /* Global read/write IO*/
+ loff_t offset;
+ unsigned long length;
+ void *kern_buff;
+
+ struct page **pages;
+ unsigned nr_pages;
+ unsigned pgbase;
+ unsigned pages_consumed;
+
+ /* Attributes */
+ unsigned in_attr_len;
+ struct osd_attr *in_attr;
+ unsigned out_attr_len;
+ struct osd_attr *out_attr;
+
+ bool reading;
+
+ /* Variable array of size numdevs */
+ unsigned numdevs;
+ struct ore_per_dev_state {
+ struct osd_request *or;
+ struct bio *bio;
+ loff_t offset;
+ unsigned length;
+ unsigned dev;
+ } per_dev[];
+};
+
+static inline unsigned ore_io_state_size(unsigned numdevs)
+{
+ return sizeof(struct ore_io_state) +
+ sizeof(struct ore_per_dev_state) * numdevs;
+}
+
+/* ore.c */
+int ore_get_rw_state(struct ore_layout *layout, struct ore_components *comps,
+ bool is_reading, u64 offset, u64 length,
+ struct ore_io_state **ios);
+int ore_get_io_state(struct ore_layout *layout, struct ore_components *comps,
+ struct ore_io_state **ios);
+void ore_put_io_state(struct ore_io_state *ios);
+
+int ore_check_io(struct ore_io_state *ios, u64 *resid);
+
+int ore_create(struct ore_io_state *ios);
+int ore_remove(struct ore_io_state *ios);
+int ore_write(struct ore_io_state *ios);
+int ore_read(struct ore_io_state *ios);
+int ore_truncate(struct ore_layout *layout, struct ore_components *comps,
+ u64 size);
+
+int extract_attr_from_ios(struct ore_io_state *ios, struct osd_attr *attr);
+
+extern const struct osd_attr g_attr_logical_length;
+
+#endif
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 6363193a341..b50a5473624 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -23,7 +23,7 @@ TRACE_EVENT(ext4_free_inode,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( umode_t, mode )
+ __field( __u16, mode )
__field( uid_t, uid )
__field( gid_t, gid )
__field( __u64, blocks )
@@ -52,7 +52,7 @@ TRACE_EVENT(ext4_request_inode,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, dir )
- __field( umode_t, mode )
+ __field( __u16, mode )
),
TP_fast_assign(
@@ -75,7 +75,7 @@ TRACE_EVENT(ext4_allocate_inode,
__field( dev_t, dev )
__field( ino_t, ino )
__field( ino_t, dir )
- __field( umode_t, mode )
+ __field( __u16, mode )
),
TP_fast_assign(
@@ -725,7 +725,7 @@ TRACE_EVENT(ext4_free_blocks,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( umode_t, mode )
+ __field( __u16, mode )
__field( __u64, block )
__field( unsigned long, count )
__field( int, flags )
@@ -1012,7 +1012,7 @@ TRACE_EVENT(ext4_forget,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( umode_t, mode )
+ __field( __u16, mode )
__field( int, is_metadata )
__field( __u64, block )
),
@@ -1039,7 +1039,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( umode_t, mode )
+ __field( __u16, mode )
__field( __u64, i_blocks )
__field( int, used_blocks )
__field( int, reserved_data_blocks )
@@ -1076,7 +1076,7 @@ TRACE_EVENT(ext4_da_reserve_space,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( umode_t, mode )
+ __field( __u16, mode )
__field( __u64, i_blocks )
__field( int, md_needed )
__field( int, reserved_data_blocks )
@@ -1110,7 +1110,7 @@ TRACE_EVENT(ext4_da_release_space,
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( umode_t, mode )
+ __field( __u16, mode )
__field( __u64, i_blocks )
__field( int, freed_blocks )
__field( int, reserved_data_blocks )
@@ -1518,6 +1518,77 @@ TRACE_EVENT(ext4_load_inode,
(unsigned long) __entry->ino)
);
+TRACE_EVENT(ext4_journal_start,
+ TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP),
+
+ TP_ARGS(sb, nblocks, IP),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( int, nblocks )
+ __field(unsigned long, ip )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->nblocks = nblocks;
+ __entry->ip = IP;
+ ),
+
+ TP_printk("dev %d,%d nblocks %d caller %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->nblocks, (void *)__entry->ip)
+);
+
+DECLARE_EVENT_CLASS(ext4__trim,
+ TP_PROTO(struct super_block *sb,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len),
+
+ TP_ARGS(sb, group, start, len),
+
+ TP_STRUCT__entry(
+ __field( int, dev_major )
+ __field( int, dev_minor )
+ __field( __u32, group )
+ __field( int, start )
+ __field( int, len )
+ ),
+
+ TP_fast_assign(
+ __entry->dev_major = MAJOR(sb->s_dev);
+ __entry->dev_minor = MINOR(sb->s_dev);
+ __entry->group = group;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("dev %d,%d group %u, start %d, len %d",
+ __entry->dev_major, __entry->dev_minor,
+ __entry->group, __entry->start, __entry->len)
+);
+
+DEFINE_EVENT(ext4__trim, ext4_trim_extent,
+
+ TP_PROTO(struct super_block *sb,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len),
+
+ TP_ARGS(sb, group, start, len)
+);
+
+DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
+
+ TP_PROTO(struct super_block *sb,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len),
+
+ TP_ARGS(sb, group, start, len)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index bf16545cc97..75964412ddb 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -26,8 +26,8 @@ TRACE_EVENT(jbd2_checkpoint,
__entry->result = result;
),
- TP_printk("dev %s result %d",
- jbd2_dev_to_name(__entry->dev), __entry->result)
+ TP_printk("dev %d,%d result %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result)
);
DECLARE_EVENT_CLASS(jbd2_commit,
@@ -48,9 +48,9 @@ DECLARE_EVENT_CLASS(jbd2_commit,
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %s transaction %d sync %d",
- jbd2_dev_to_name(__entry->dev), __entry->transaction,
- __entry->sync_commit)
+ TP_printk("dev %d,%d transaction %d sync %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->transaction, __entry->sync_commit)
);
DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
@@ -100,9 +100,9 @@ TRACE_EVENT(jbd2_end_commit,
__entry->head = journal->j_tail_sequence;
),
- TP_printk("dev %s transaction %d sync %d head %d",
- jbd2_dev_to_name(__entry->dev), __entry->transaction,
- __entry->sync_commit, __entry->head)
+ TP_printk("dev %d,%d transaction %d sync %d head %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->transaction, __entry->sync_commit, __entry->head)
);
TRACE_EVENT(jbd2_submit_inode_data,
@@ -120,8 +120,9 @@ TRACE_EVENT(jbd2_submit_inode_data,
__entry->ino = inode->i_ino;
),
- TP_printk("dev %s ino %lu",
- jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
+ TP_printk("dev %d,%d ino %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino)
);
TRACE_EVENT(jbd2_run_stats,
@@ -156,9 +157,9 @@ TRACE_EVENT(jbd2_run_stats,
__entry->blocks_logged = stats->rs_blocks_logged;
),
- TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u "
+ TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u "
"logging %u handle_count %u blocks %u blocks_logged %u",
- jbd2_dev_to_name(__entry->dev), __entry->tid,
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
jiffies_to_msecs(__entry->wait),
jiffies_to_msecs(__entry->running),
jiffies_to_msecs(__entry->locked),
@@ -192,9 +193,9 @@ TRACE_EVENT(jbd2_checkpoint_stats,
__entry->dropped = stats->cs_dropped;
),
- TP_printk("dev %s tid %lu chp_time %u forced_to_close %u "
+ TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
"written %u dropped %u",
- jbd2_dev_to_name(__entry->dev), __entry->tid,
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
jiffies_to_msecs(__entry->chp_time),
__entry->forced_to_close, __entry->written, __entry->dropped)
);
@@ -222,9 +223,10 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,
__entry->freed = freed;
),
- TP_printk("dev %s from %u to %u offset %lu freed %lu",
- jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
- __entry->first_tid, __entry->block_nr, __entry->freed)
+ TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tail_sequence, __entry->first_tid,
+ __entry->block_nr, __entry->freed)
);
#endif /* _TRACE_JBD2_H */
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 892b97f8e15..3b55ef22f8d 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -21,8 +21,6 @@
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <asm/atomic.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
@@ -136,12 +134,6 @@ enum omap_display_caps {
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
};
-enum omap_dss_update_mode {
- OMAP_DSS_UPDATE_DISABLED = 0,
- OMAP_DSS_UPDATE_AUTO,
- OMAP_DSS_UPDATE_MANUAL,
-};
-
enum omap_dss_display_state {
OMAP_DSS_DISPLAY_DISABLED = 0,
OMAP_DSS_DISPLAY_ACTIVE,
@@ -246,7 +238,7 @@ int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel);
/* Board specific data */
struct omap_dss_board_info {
- int (*get_last_off_on_transaction_id)(struct device *dev);
+ int (*get_context_loss_count)(struct device *dev);
int num_devices;
struct omap_dss_device **devices;
struct omap_dss_device *default_device;
@@ -266,8 +258,6 @@ static inline int omap_display_init(struct omap_dss_board_info *board_data)
struct omap_display_platform_data {
struct omap_dss_board_info *board_data;
/* TODO: Additional members to be added when PM is considered */
-
- bool (*opt_clock_available)(const char *clk_role);
};
struct omap_video_timings {
@@ -300,6 +290,12 @@ extern const struct omap_video_timings omap_dss_pal_timings;
extern const struct omap_video_timings omap_dss_ntsc_timings;
#endif
+struct omap_dss_cpr_coefs {
+ s16 rr, rg, rb;
+ s16 gr, gg, gb;
+ s16 br, bg, bb;
+};
+
struct omap_overlay_info {
bool enabled;
@@ -359,6 +355,9 @@ struct omap_overlay_manager_info {
bool trans_enabled;
bool alpha_enabled;
+
+ bool cpr_enable;
+ struct omap_dss_cpr_coefs cpr_coefs;
};
struct omap_overlay_manager {
@@ -526,11 +525,6 @@ struct omap_dss_driver {
int (*resume)(struct omap_dss_device *display);
int (*run_test)(struct omap_dss_device *display, int test);
- int (*set_update_mode)(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode);
- enum omap_dss_update_mode (*get_update_mode)(
- struct omap_dss_device *dssdev);
-
int (*update)(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h);
int (*sync)(struct omap_dss_device *dssdev);
diff --git a/init/main.c b/init/main.c
index d7211faed2a..9c51ee7adf3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -369,9 +369,12 @@ static noinline void __init_refok rest_init(void)
init_idle_bootup_task(current);
preempt_enable_no_resched();
schedule();
- preempt_disable();
+
+ /* At this point, we can enable user mode helper functionality */
+ usermodehelper_enable();
/* Call into cpu_idle with preempt disabled */
+ preempt_disable();
cpu_idle();
}
@@ -715,7 +718,7 @@ static void __init do_basic_setup(void)
{
cpuset_init_smp();
usermodehelper_init();
- init_tmpfs();
+ shmem_init();
driver_init();
init_irq_proc();
do_ctors();
diff --git a/ipc/shm.c b/ipc/shm.c
index 9fb044f3b34..02ecf2c078f 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -105,9 +105,16 @@ void shm_exit_ns(struct ipc_namespace *ns)
}
#endif
-void __init shm_init (void)
+static int __init ipc_ns_init(void)
{
shm_init_ns(&init_ipc_ns);
+ return 0;
+}
+
+pure_initcall(ipc_ns_init);
+
+void __init shm_init (void)
+{
ipc_init_proc_interface("sysvipc/shm",
#if BITS_PER_LONG <= 32
" key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
@@ -294,7 +301,7 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
void shm_destroy_orphaned(struct ipc_namespace *ns)
{
down_write(&shm_ids(ns).rw_mutex);
- if (&shm_ids(ns).in_use)
+ if (shm_ids(ns).in_use)
idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
up_write(&shm_ids(ns).rw_mutex);
}
@@ -304,9 +311,12 @@ void exit_shm(struct task_struct *task)
{
struct ipc_namespace *ns = task->nsproxy->ipc_ns;
+ if (shm_ids(ns).in_use == 0)
+ return;
+
/* Destroy all already created segments, but not mapped yet */
down_write(&shm_ids(ns).rw_mutex);
- if (&shm_ids(ns).in_use)
+ if (shm_ids(ns).in_use)
idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
up_write(&shm_ids(ns).rw_mutex);
}
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c
index a11db956dd6..34872482315 100644
--- a/kernel/debug/gdbstub.c
+++ b/kernel/debug/gdbstub.c
@@ -42,6 +42,8 @@
/* Our I/O buffers. */
static char remcom_in_buffer[BUFMAX];
static char remcom_out_buffer[BUFMAX];
+static int gdbstub_use_prev_in_buf;
+static int gdbstub_prev_in_buf_pos;
/* Storage for the registers, in GDB format. */
static unsigned long gdb_regs[(NUMREGBYTES +
@@ -58,6 +60,13 @@ static int gdbstub_read_wait(void)
int ret = -1;
int i;
+ if (unlikely(gdbstub_use_prev_in_buf)) {
+ if (gdbstub_prev_in_buf_pos < gdbstub_use_prev_in_buf)
+ return remcom_in_buffer[gdbstub_prev_in_buf_pos++];
+ else
+ gdbstub_use_prev_in_buf = 0;
+ }
+
/* poll any additional I/O interfaces that are defined */
while (ret < 0)
for (i = 0; kdb_poll_funcs[i] != NULL; i++) {
@@ -109,7 +118,6 @@ static void get_packet(char *buffer)
buffer[count] = ch;
count = count + 1;
}
- buffer[count] = 0;
if (ch == '#') {
xmitcsum = hex_to_bin(gdbstub_read_wait()) << 4;
@@ -124,6 +132,7 @@ static void get_packet(char *buffer)
if (dbg_io_ops->flush)
dbg_io_ops->flush();
}
+ buffer[count] = 0;
} while (checksum != xmitcsum);
}
@@ -1082,12 +1091,11 @@ int gdbstub_state(struct kgdb_state *ks, char *cmd)
case 'c':
strcpy(remcom_in_buffer, cmd);
return 0;
- case '?':
- gdb_cmd_status(ks);
- break;
- case '\0':
- strcpy(remcom_out_buffer, "");
- break;
+ case '$':
+ strcpy(remcom_in_buffer, cmd);
+ gdbstub_use_prev_in_buf = strlen(remcom_in_buffer);
+ gdbstub_prev_in_buf_pos = 0;
+ return 0;
}
dbg_io_ops->write_char('+');
put_packet(remcom_out_buffer);
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 2f62fe85f16..7179eac7b41 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -112,9 +112,8 @@ kdb_bt(int argc, const char **argv)
unsigned long addr;
long offset;
- kdbgetintenv("BTARGS", &argcount); /* Arguments to print */
- kdbgetintenv("BTAPROMPT", &btaprompt); /* Prompt after each
- * proc in bta */
+ /* Prompt after each proc in bta */
+ kdbgetintenv("BTAPROMPT", &btaprompt);
if (strcmp(argv[0], "bta") == 0) {
struct task_struct *g, *p;
diff --git a/kernel/debug/kdb/kdb_cmds b/kernel/debug/kdb/kdb_cmds
index 56c88e4db30..9834ad303ab 100644
--- a/kernel/debug/kdb/kdb_cmds
+++ b/kernel/debug/kdb/kdb_cmds
@@ -18,16 +18,12 @@ defcmd dumpcommon "" "Common kdb debugging"
endefcmd
defcmd dumpall "" "First line debugging"
- set BTSYMARG 1
- set BTARGS 9
pid R
-dumpcommon
-bta
endefcmd
defcmd dumpcpu "" "Same as dumpall but only tasks on cpus"
- set BTSYMARG 1
- set BTARGS 9
pid R
-dumpcommon
-btc
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index dd0b1b7dd02..d9ca9aa481e 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -30,6 +30,8 @@ EXPORT_SYMBOL_GPL(kdb_poll_funcs);
int kdb_poll_idx = 1;
EXPORT_SYMBOL_GPL(kdb_poll_idx);
+static struct kgdb_state *kdb_ks;
+
int kdb_stub(struct kgdb_state *ks)
{
int error = 0;
@@ -39,6 +41,7 @@ int kdb_stub(struct kgdb_state *ks)
kdb_dbtrap_t db_result = KDB_DB_NOBPT;
int i;
+ kdb_ks = ks;
if (KDB_STATE(REENTRY)) {
reason = KDB_REASON_SWITCH;
KDB_STATE_CLEAR(REENTRY);
@@ -123,20 +126,8 @@ int kdb_stub(struct kgdb_state *ks)
KDB_STATE_CLEAR(PAGER);
kdbnearsym_cleanup();
if (error == KDB_CMD_KGDB) {
- if (KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)) {
- /*
- * This inteface glue which allows kdb to transition in into
- * the gdb stub. In order to do this the '?' or '' gdb serial
- * packet response is processed here. And then control is
- * passed to the gdbstub.
- */
- if (KDB_STATE(DOING_KGDB))
- gdbstub_state(ks, "?");
- else
- gdbstub_state(ks, "");
+ if (KDB_STATE(DOING_KGDB))
KDB_STATE_CLEAR(DOING_KGDB);
- KDB_STATE_CLEAR(DOING_KGDB2);
- }
return DBG_PASS_EVENT;
}
kdb_bp_install(ks->linux_regs);
@@ -166,3 +157,7 @@ int kdb_stub(struct kgdb_state *ks)
return kgdb_info[ks->cpu].ret_state;
}
+void kdb_gdb_state_pass(char *buf)
+{
+ gdbstub_state(kdb_ks, buf);
+}
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 96fdaac46a8..4802eb5840e 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -31,15 +31,21 @@ char kdb_prompt_str[CMD_BUFLEN];
int kdb_trap_printk;
-static void kgdb_transition_check(char *buffer)
+static int kgdb_transition_check(char *buffer)
{
- int slen = strlen(buffer);
- if (strncmp(buffer, "$?#3f", slen) != 0 &&
- strncmp(buffer, "$qSupported#37", slen) != 0 &&
- strncmp(buffer, "+$qSupported#37", slen) != 0) {
+ if (buffer[0] != '+' && buffer[0] != '$') {
KDB_STATE_SET(KGDB_TRANS);
kdb_printf("%s", buffer);
+ } else {
+ int slen = strlen(buffer);
+ if (slen > 3 && buffer[slen - 3] == '#') {
+ kdb_gdb_state_pass(buffer);
+ strcpy(buffer, "kgdb");
+ KDB_STATE_SET(DOING_KGDB);
+ return 1;
+ }
}
+ return 0;
}
static int kdb_read_get_key(char *buffer, size_t bufsize)
@@ -251,6 +257,10 @@ poll_again:
case 13: /* enter */
*lastchar++ = '\n';
*lastchar++ = '\0';
+ if (!KDB_STATE(KGDB_TRANS)) {
+ KDB_STATE_SET(KGDB_TRANS);
+ kdb_printf("%s", buffer);
+ }
kdb_printf("\n");
return buffer;
case 4: /* Del */
@@ -382,22 +392,26 @@ poll_again:
* printed characters if we think that
* kgdb is connecting, until the check
* fails */
- if (!KDB_STATE(KGDB_TRANS))
- kgdb_transition_check(buffer);
- else
+ if (!KDB_STATE(KGDB_TRANS)) {
+ if (kgdb_transition_check(buffer))
+ return buffer;
+ } else {
kdb_printf("%c", key);
+ }
}
/* Special escape to kgdb */
if (lastchar - buffer >= 5 &&
strcmp(lastchar - 5, "$?#3f") == 0) {
+ kdb_gdb_state_pass(lastchar - 5);
strcpy(buffer, "kgdb");
KDB_STATE_SET(DOING_KGDB);
return buffer;
}
- if (lastchar - buffer >= 14 &&
- strcmp(lastchar - 14, "$qSupported#37") == 0) {
+ if (lastchar - buffer >= 11 &&
+ strcmp(lastchar - 11, "$qSupported") == 0) {
+ kdb_gdb_state_pass(lastchar - 11);
strcpy(buffer, "kgdb");
- KDB_STATE_SET(DOING_KGDB2);
+ KDB_STATE_SET(DOING_KGDB);
return buffer;
}
}
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index be14779bcef..63786e71a3c 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -145,7 +145,6 @@ static char *__env[] = {
#endif
"RADIX=16",
"MDCOUNT=8", /* lines of md output */
- "BTARGS=9", /* 9 possible args in bt */
KDB_PLATFORM_ENV,
"DTABCOUNT=30",
"NOSECT=1",
@@ -172,6 +171,7 @@ static char *__env[] = {
(char *)0,
(char *)0,
(char *)0,
+ (char *)0,
};
static const int __nenv = (sizeof(__env) / sizeof(char *));
@@ -1386,7 +1386,7 @@ int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
}
if (result == KDB_CMD_KGDB) {
- if (!(KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)))
+ if (!KDB_STATE(DOING_KGDB))
kdb_printf("Entering please attach debugger "
"or use $D#44+ or $3#33\n");
break;
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 35d69ed1dfb..e381d105b40 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -21,7 +21,6 @@
#define KDB_CMD_SS (-1003)
#define KDB_CMD_SSB (-1004)
#define KDB_CMD_KGDB (-1005)
-#define KDB_CMD_KGDB2 (-1006)
/* Internal debug flags */
#define KDB_DEBUG_FLAG_BP 0x0002 /* Breakpoint subsystem debug */
@@ -146,7 +145,6 @@ extern int kdb_state;
* keyboard on this cpu */
#define KDB_STATE_KEXEC 0x00040000 /* kexec issued */
#define KDB_STATE_DOING_KGDB 0x00080000 /* kgdb enter now issued */
-#define KDB_STATE_DOING_KGDB2 0x00100000 /* kgdb enter now issued */
#define KDB_STATE_KGDB_TRANS 0x00200000 /* Transition to kgdb */
#define KDB_STATE_ARCH 0xff000000 /* Reserved for arch
* specific use */
@@ -218,6 +216,7 @@ extern void kdb_print_nameval(const char *name, unsigned long val);
extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
extern void kdb_meminfo_proc_show(void);
extern char *kdb_getstr(char *, size_t, char *);
+extern void kdb_gdb_state_pass(char *buf);
/* Defines for kdb_symbol_print */
#define KDB_SP_SPACEB 0x0001 /* Space before string */
diff --git a/kernel/futex.c b/kernel/futex.c
index 0a308970c24..11cbe052b2e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -218,6 +218,8 @@ static void drop_futex_key_refs(union futex_key *key)
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
+ * @rw: mapping needs to be read/write (values: VERIFY_READ,
+ * VERIFY_WRITE)
*
* Returns a negative error code or 0
* The key words are stored in *key on success.
@@ -229,12 +231,12 @@ static void drop_futex_key_refs(union futex_key *key)
* lock_page() might sleep, the caller should not hold a spinlock.
*/
static int
-get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct page *page, *page_head;
- int err;
+ int err, ro = 0;
/*
* The futex address must be "naturally" aligned.
@@ -262,8 +264,18 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
again:
err = get_user_pages_fast(address, 1, 1, &page);
+ /*
+ * If write access is not required (eg. FUTEX_WAIT), try
+ * and get read-only access.
+ */
+ if (err == -EFAULT && rw == VERIFY_READ) {
+ err = get_user_pages_fast(address, 1, 0, &page);
+ ro = 1;
+ }
if (err < 0)
return err;
+ else
+ err = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
page_head = page;
@@ -305,6 +317,13 @@ again:
if (!page_head->mapping) {
unlock_page(page_head);
put_page(page_head);
+ /*
+ * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
+ * trying to find one. RW mapping would have COW'd (and thus
+ * have a mapping) so this page is RO and won't ever change.
+ */
+ if ((page_head == ZERO_PAGE(address)))
+ return -EFAULT;
goto again;
}
@@ -316,6 +335,15 @@ again:
* the object not the particular process.
*/
if (PageAnon(page_head)) {
+ /*
+ * A RO anonymous page will never change and thus doesn't make
+ * sense for futex operations.
+ */
+ if (ro) {
+ err = -EFAULT;
+ goto out;
+ }
+
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
@@ -327,9 +355,10 @@ again:
get_futex_key_refs(key);
+out:
unlock_page(page_head);
put_page(page_head);
- return 0;
+ return err;
}
static inline void put_futex_key(union futex_key *key)
@@ -940,7 +969,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
if (!bitset)
return -EINVAL;
- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
+ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
@@ -986,10 +1015,10 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
int ret, op_ret;
retry:
- ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
+ ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out_put_key1;
@@ -1243,10 +1272,11 @@ retry:
pi_state = NULL;
}
- ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
+ ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
+ requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
@@ -1790,7 +1820,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
* while the syscall executes.
*/
retry:
- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key);
+ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
if (unlikely(ret != 0))
return ret;
@@ -1941,7 +1971,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
}
retry:
- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key);
+ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
@@ -2060,7 +2090,7 @@ retry:
if ((uval & FUTEX_TID_MASK) != vpid)
return -EPERM;
- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
+ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
@@ -2249,7 +2279,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 47613dfb7b2..ddc7644c130 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -274,7 +274,7 @@ static void __call_usermodehelper(struct work_struct *work)
* (used for preventing user land processes from being created after the user
* land has been frozen during a system-wide hibernation or suspend operation).
*/
-static int usermodehelper_disabled;
+static int usermodehelper_disabled = 1;
/* Number of helpers running */
static atomic_t running_helpers = ATOMIC_INIT(0);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3956f5149e2..8c24294e477 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2468,7 +2468,7 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
BUG_ON(usage_bit >= LOCK_USAGE_STATES);
- if (hlock_class(hlock)->key == &__lockdep_no_validate__)
+ if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
continue;
if (!mark_lock(curr, hlock, usage_bit))
@@ -2485,23 +2485,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
{
struct task_struct *curr = current;
- if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
- return;
-
- if (unlikely(curr->hardirqs_enabled)) {
- /*
- * Neither irq nor preemption are disabled here
- * so this is racy by nature but losing one hit
- * in a stat is not a big deal.
- */
- __debug_atomic_inc(redundant_hardirqs_on);
- return;
- }
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
- if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
- return;
/*
* We are going to turn hardirqs on, so set the
* usage bit for all held locks:
@@ -2529,9 +2515,25 @@ void trace_hardirqs_on_caller(unsigned long ip)
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
+ if (unlikely(current->hardirqs_enabled)) {
+ /*
+ * Neither irq nor preemption are disabled here
+ * so this is racy by nature but losing one hit
+ * in a stat is not a big deal.
+ */
+ __debug_atomic_inc(redundant_hardirqs_on);
+ return;
+ }
+
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
+ if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
+ return;
+
+ if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
+ return;
+
current->lockdep_recursion = 1;
__trace_hardirqs_on_caller(ip);
current->lockdep_recursion = 0;
@@ -2872,10 +2874,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
- int i;
-
- for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
- lock->class_cache[i] = NULL;
+ memset(lock, 0, sizeof(*lock));
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
diff --git a/kernel/resource.c b/kernel/resource.c
index 3ff40178dce..3b3cedc5259 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -553,6 +553,27 @@ int allocate_resource(struct resource *root, struct resource *new,
EXPORT_SYMBOL(allocate_resource);
+/**
+ * lookup_resource - find an existing resource by a resource start address
+ * @root: root resource descriptor
+ * @start: resource start address
+ *
+ * Returns a pointer to the resource if found, NULL otherwise
+ */
+struct resource *lookup_resource(struct resource *root, resource_size_t start)
+{
+ struct resource *res;
+
+ read_lock(&resource_lock);
+ for (res = root->child; res; res = res->sibling) {
+ if (res->start == start)
+ break;
+ }
+ read_unlock(&resource_lock);
+
+ return res;
+}
+
/*
* Insert a resource into the resource tree. If successful, return NULL,
* otherwise return the conflicting resource (compare to __request_resource())
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index d1db2880d1c..e19ce1454ee 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -291,30 +291,28 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
if (!cpumask_subset(mask, cpu_possible_mask))
return -EINVAL;
- s = NULL;
if (isadd == REGISTER) {
for_each_cpu(cpu, mask) {
- if (!s)
- s = kmalloc_node(sizeof(struct listener),
- GFP_KERNEL, cpu_to_node(cpu));
+ s = kmalloc_node(sizeof(struct listener),
+ GFP_KERNEL, cpu_to_node(cpu));
if (!s)
goto cleanup;
+
s->pid = pid;
- INIT_LIST_HEAD(&s->list);
s->valid = 1;
listeners = &per_cpu(listener_array, cpu);
down_write(&listeners->sem);
- list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
- if (s2->pid == pid)
- goto next_cpu;
+ list_for_each_entry(s2, &listeners->list, list) {
+ if (s2->pid == pid && s2->valid)
+ goto exists;
}
list_add(&s->list, &listeners->list);
s = NULL;
-next_cpu:
+exists:
up_write(&listeners->sem);
+ kfree(s); /* nop if NULL */
}
- kfree(s);
return 0;
}
diff --git a/lib/Kconfig b/lib/Kconfig
index 32f3e5ae2be..6c695ff9cab 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -276,4 +276,7 @@ config CORDIC
so its calculations are in fixed point. Modules can select this
when they require this function. Module will be called cordic.
+config LLIST
+ bool
+
endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 892f4e282ea..d5d175c8a6c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -10,7 +10,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o prio_tree.o \
- sha1.o irq_regs.o reciprocal_div.o argv_split.o \
+ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o find_next_bit.o
@@ -115,6 +115,8 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
obj-$(CONFIG_CORDIC) += cordic.o
+obj-$(CONFIG_LLIST) += llist.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 37ef4b04879..2f4412e4d07 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,8 +271,6 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
-#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
-
void bitmap_set(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 2577b121c7c..f193b779644 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -197,21 +197,15 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
return debugfs_create_file(name, mode, parent, value, &fops_atomic_t);
}
-void cleanup_fault_attr_dentries(struct fault_attr *attr)
-{
- debugfs_remove_recursive(attr->dir);
-}
-
-int init_fault_attr_dentries(struct fault_attr *attr, const char *name)
+struct dentry *fault_create_debugfs_attr(const char *name,
+ struct dentry *parent, struct fault_attr *attr)
{
mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
- dir = debugfs_create_dir(name, NULL);
+ dir = debugfs_create_dir(name, parent);
if (!dir)
- return -ENOMEM;
-
- attr->dir = dir;
+ return ERR_PTR(-ENOMEM);
if (!debugfs_create_ul("probability", mode, dir, &attr->probability))
goto fail;
@@ -243,11 +237,11 @@ int init_fault_attr_dentries(struct fault_attr *attr, const char *name)
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
- return 0;
+ return dir;
fail:
- debugfs_remove_recursive(attr->dir);
+ debugfs_remove_recursive(dir);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 577ddf80597..f352cc42f4f 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -1,8 +1,26 @@
/*
- * Basic general purpose allocator for managing special purpose memory
- * not managed by the regular kmalloc/kfree interface.
- * Uses for this includes on-device special memory, uncached memory
- * etc.
+ * Basic general purpose allocator for managing special purpose
+ * memory, for example, memory that is not managed by the regular
+ * kmalloc/kfree interface. Uses for this includes on-device special
+ * memory, uncached memory etc.
+ *
+ * It is safe to use the allocator in NMI handlers and other special
+ * unblockable contexts that could otherwise deadlock on locks. This
+ * is implemented by using atomic operations and retries on any
+ * conflicts. The disadvantage is that there may be livelocks in
+ * extreme cases. For better scalability, one allocator can be used
+ * for each CPU.
+ *
+ * The lockless operation only works if there is enough memory
+ * available. If new memory is added to the pool a lock has to be
+ * still taken. So any user relying on locklessness has to ensure
+ * that sufficient memory is preallocated.
+ *
+ * The basic atomic operation of this allocator is cmpxchg on long.
+ * On architectures that don't have NMI-safe cmpxchg implementation,
+ * the allocator can NOT be used in NMI handler. So code uses the
+ * allocator in NMI handler should depend on
+ * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
*
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
*
@@ -13,8 +31,109 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bitmap.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
#include <linux/genalloc.h>
+static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
+{
+ unsigned long val, nval;
+
+ nval = *addr;
+ do {
+ val = nval;
+ if (val & mask_to_set)
+ return -EBUSY;
+ cpu_relax();
+ } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
+
+ return 0;
+}
+
+static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
+{
+ unsigned long val, nval;
+
+ nval = *addr;
+ do {
+ val = nval;
+ if ((val & mask_to_clear) != mask_to_clear)
+ return -EBUSY;
+ cpu_relax();
+ } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
+
+ return 0;
+}
+
+/*
+ * bitmap_set_ll - set the specified number of bits at the specified position
+ * @map: pointer to a bitmap
+ * @start: a bit position in @map
+ * @nr: number of bits to set
+ *
+ * Set @nr bits start from @start in @map lock-lessly. Several users
+ * can set/clear the same bitmap simultaneously without lock. If two
+ * users set the same bit, one user will return remain bits, otherwise
+ * return 0.
+ */
+static int bitmap_set_ll(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_set >= 0) {
+ if (set_bits_ll(p, mask_to_set))
+ return nr;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ if (set_bits_ll(p, mask_to_set))
+ return nr;
+ }
+
+ return 0;
+}
+
+/*
+ * bitmap_clear_ll - clear the specified number of bits at the specified position
+ * @map: pointer to a bitmap
+ * @start: a bit position in @map
+ * @nr: number of bits to set
+ *
+ * Clear @nr bits start from @start in @map lock-lessly. Several users
+ * can set/clear the same bitmap simultaneously without lock. If two
+ * users clear the same bit, one user will return remain bits,
+ * otherwise return 0.
+ */
+static int bitmap_clear_ll(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_clear >= 0) {
+ if (clear_bits_ll(p, mask_to_clear))
+ return nr;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ if (clear_bits_ll(p, mask_to_clear))
+ return nr;
+ }
+
+ return 0;
+}
/**
* gen_pool_create - create a new special memory pool
@@ -30,7 +149,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
if (pool != NULL) {
- rwlock_init(&pool->lock);
+ spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->chunks);
pool->min_alloc_order = min_alloc_order;
}
@@ -63,14 +182,14 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
if (unlikely(chunk == NULL))
return -ENOMEM;
- spin_lock_init(&chunk->lock);
chunk->phys_addr = phys;
chunk->start_addr = virt;
chunk->end_addr = virt + size;
+ atomic_set(&chunk->avail, size);
- write_lock(&pool->lock);
- list_add(&chunk->next_chunk, &pool->chunks);
- write_unlock(&pool->lock);
+ spin_lock(&pool->lock);
+ list_add_rcu(&chunk->next_chunk, &pool->chunks);
+ spin_unlock(&pool->lock);
return 0;
}
@@ -85,19 +204,19 @@ EXPORT_SYMBOL(gen_pool_add_virt);
*/
phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
{
- struct list_head *_chunk;
struct gen_pool_chunk *chunk;
+ phys_addr_t paddr = -1;
- read_lock(&pool->lock);
- list_for_each(_chunk, &pool->chunks) {
- chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
-
- if (addr >= chunk->start_addr && addr < chunk->end_addr)
- return chunk->phys_addr + addr - chunk->start_addr;
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
+ if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+ paddr = chunk->phys_addr + (addr - chunk->start_addr);
+ break;
+ }
}
- read_unlock(&pool->lock);
+ rcu_read_unlock();
- return -1;
+ return paddr;
}
EXPORT_SYMBOL(gen_pool_virt_to_phys);
@@ -115,7 +234,6 @@ void gen_pool_destroy(struct gen_pool *pool)
int order = pool->min_alloc_order;
int bit, end_bit;
-
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
list_del(&chunk->next_chunk);
@@ -137,44 +255,50 @@ EXPORT_SYMBOL(gen_pool_destroy);
* @size: number of bytes to allocate from the pool
*
* Allocate the requested number of bytes from the specified pool.
- * Uses a first-fit algorithm.
+ * Uses a first-fit algorithm. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
*/
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
- struct list_head *_chunk;
struct gen_pool_chunk *chunk;
- unsigned long addr, flags;
+ unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit, end_bit;
+ int nbits, start_bit = 0, end_bit, remain;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
if (size == 0)
return 0;
nbits = (size + (1UL << order) - 1) >> order;
-
- read_lock(&pool->lock);
- list_for_each(_chunk, &pool->chunks) {
- chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
+ if (size > atomic_read(&chunk->avail))
+ continue;
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
-
- spin_lock_irqsave(&chunk->lock, flags);
- start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
- nbits, 0);
- if (start_bit >= end_bit) {
- spin_unlock_irqrestore(&chunk->lock, flags);
+retry:
+ start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
+ start_bit, nbits, 0);
+ if (start_bit >= end_bit)
continue;
+ remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
+ if (remain) {
+ remain = bitmap_clear_ll(chunk->bits, start_bit,
+ nbits - remain);
+ BUG_ON(remain);
+ goto retry;
}
addr = chunk->start_addr + ((unsigned long)start_bit << order);
-
- bitmap_set(chunk->bits, start_bit, nbits);
- spin_unlock_irqrestore(&chunk->lock, flags);
- read_unlock(&pool->lock);
- return addr;
+ size = nbits << order;
+ atomic_sub(size, &chunk->avail);
+ break;
}
- read_unlock(&pool->lock);
- return 0;
+ rcu_read_unlock();
+ return addr;
}
EXPORT_SYMBOL(gen_pool_alloc);
@@ -184,33 +308,95 @@ EXPORT_SYMBOL(gen_pool_alloc);
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
*
- * Free previously allocated special memory back to the specified pool.
+ * Free previously allocated special memory back to the specified
+ * pool. Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
*/
void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
{
- struct list_head *_chunk;
struct gen_pool_chunk *chunk;
- unsigned long flags;
int order = pool->min_alloc_order;
- int bit, nbits;
+ int start_bit, nbits, remain;
- nbits = (size + (1UL << order) - 1) >> order;
-
- read_lock(&pool->lock);
- list_for_each(_chunk, &pool->chunks) {
- chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+ nbits = (size + (1UL << order) - 1) >> order;
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
if (addr >= chunk->start_addr && addr < chunk->end_addr) {
BUG_ON(addr + size > chunk->end_addr);
- spin_lock_irqsave(&chunk->lock, flags);
- bit = (addr - chunk->start_addr) >> order;
- while (nbits--)
- __clear_bit(bit++, chunk->bits);
- spin_unlock_irqrestore(&chunk->lock, flags);
- break;
+ start_bit = (addr - chunk->start_addr) >> order;
+ remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
+ BUG_ON(remain);
+ size = nbits << order;
+ atomic_add(size, &chunk->avail);
+ rcu_read_unlock();
+ return;
}
}
- BUG_ON(nbits > 0);
- read_unlock(&pool->lock);
+ rcu_read_unlock();
+ BUG();
}
EXPORT_SYMBOL(gen_pool_free);
+
+/**
+ * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
+ * @pool: the generic memory pool
+ * @func: func to call
+ * @data: additional data used by @func
+ *
+ * Call @func for every chunk of generic memory pool. The @func is
+ * called with rcu_read_lock held.
+ */
+void gen_pool_for_each_chunk(struct gen_pool *pool,
+ void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
+ void *data)
+{
+ struct gen_pool_chunk *chunk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
+ func(pool, chunk, data);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(gen_pool_for_each_chunk);
+
+/**
+ * gen_pool_avail - get available free space of the pool
+ * @pool: pool to get available free space
+ *
+ * Return available free space of the specified pool.
+ */
+size_t gen_pool_avail(struct gen_pool *pool)
+{
+ struct gen_pool_chunk *chunk;
+ size_t avail = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
+ avail += atomic_read(&chunk->avail);
+ rcu_read_unlock();
+ return avail;
+}
+EXPORT_SYMBOL_GPL(gen_pool_avail);
+
+/**
+ * gen_pool_size - get size in bytes of memory managed by the pool
+ * @pool: pool to get size
+ *
+ * Return size in bytes of memory managed by the pool.
+ */
+size_t gen_pool_size(struct gen_pool *pool)
+{
+ struct gen_pool_chunk *chunk;
+ size_t size = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
+ size += chunk->end_addr - chunk->start_addr;
+ rcu_read_unlock();
+ return size;
+}
+EXPORT_SYMBOL_GPL(gen_pool_size);
diff --git a/lib/idr.c b/lib/idr.c
index e15502e8b21..db040ce3fa7 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -34,8 +34,10 @@
#include <linux/err.h>
#include <linux/string.h>
#include <linux/idr.h>
+#include <linux/spinlock.h>
static struct kmem_cache *idr_layer_cache;
+static DEFINE_SPINLOCK(simple_ida_lock);
static struct idr_layer *get_from_free_list(struct idr *idp)
{
@@ -926,6 +928,71 @@ void ida_destroy(struct ida *ida)
EXPORT_SYMBOL(ida_destroy);
/**
+ * ida_simple_get - get a new id.
+ * @ida: the (initialized) ida.
+ * @start: the minimum id (inclusive, < 0x8000000)
+ * @end: the maximum id (exclusive, < 0x8000000 or 0)
+ * @gfp_mask: memory allocation flags
+ *
+ * Allocates an id in the range start <= id < end, or returns -ENOSPC.
+ * On memory allocation failure, returns -ENOMEM.
+ *
+ * Use ida_simple_remove() to get rid of an id.
+ */
+int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
+ gfp_t gfp_mask)
+{
+ int ret, id;
+ unsigned int max;
+
+ BUG_ON((int)start < 0);
+ BUG_ON((int)end < 0);
+
+ if (end == 0)
+ max = 0x80000000;
+ else {
+ BUG_ON(end < start);
+ max = end - 1;
+ }
+
+again:
+ if (!ida_pre_get(ida, gfp_mask))
+ return -ENOMEM;
+
+ spin_lock(&simple_ida_lock);
+ ret = ida_get_new_above(ida, start, &id);
+ if (!ret) {
+ if (id > max) {
+ ida_remove(ida, id);
+ ret = -ENOSPC;
+ } else {
+ ret = id;
+ }
+ }
+ spin_unlock(&simple_ida_lock);
+
+ if (unlikely(ret == -EAGAIN))
+ goto again;
+
+ return ret;
+}
+EXPORT_SYMBOL(ida_simple_get);
+
+/**
+ * ida_simple_remove - remove an allocated id.
+ * @ida: the (initialized) ida.
+ * @id: the id returned by ida_simple_get.
+ */
+void ida_simple_remove(struct ida *ida, unsigned int id)
+{
+ BUG_ON((int)id < 0);
+ spin_lock(&simple_ida_lock);
+ ida_remove(ida, id);
+ spin_unlock(&simple_ida_lock);
+}
+EXPORT_SYMBOL(ida_simple_remove);
+
+/**
* ida_init - initialize ida handle
* @ida: ida handle
*
diff --git a/lib/llist.c b/lib/llist.c
new file mode 100644
index 00000000000..da445724fa1
--- /dev/null
+++ b/lib/llist.c
@@ -0,0 +1,129 @@
+/*
+ * Lock-less NULL terminated single linked list
+ *
+ * The basic atomic operation of this list is cmpxchg on long. On
+ * architectures that don't have NMI-safe cmpxchg implementation, the
+ * list can NOT be used in NMI handler. So code uses the list in NMI
+ * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ *
+ * Copyright 2010,2011 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/llist.h>
+
+#include <asm/system.h>
+
+/**
+ * llist_add - add a new entry
+ * @new: new entry to be added
+ * @head: the head for your lock-less list
+ */
+void llist_add(struct llist_node *new, struct llist_head *head)
+{
+ struct llist_node *entry, *old_entry;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ entry = head->first;
+ do {
+ old_entry = entry;
+ new->next = entry;
+ cpu_relax();
+ } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
+}
+EXPORT_SYMBOL_GPL(llist_add);
+
+/**
+ * llist_add_batch - add several linked entries in batch
+ * @new_first: first entry in batch to be added
+ * @new_last: last entry in batch to be added
+ * @head: the head for your lock-less list
+ */
+void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+ struct llist_head *head)
+{
+ struct llist_node *entry, *old_entry;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ entry = head->first;
+ do {
+ old_entry = entry;
+ new_last->next = entry;
+ cpu_relax();
+ } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry);
+}
+EXPORT_SYMBOL_GPL(llist_add_batch);
+
+/**
+ * llist_del_first - delete the first entry of lock-less list
+ * @head: the head for your lock-less list
+ *
+ * If list is empty, return NULL, otherwise, return the first entry
+ * deleted, this is the newest added one.
+ *
+ * Only one llist_del_first user can be used simultaneously with
+ * multiple llist_add users without lock. Because otherwise
+ * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
+ * llist_add) sequence in another user may change @head->first->next,
+ * but keep @head->first. If multiple consumers are needed, please
+ * use llist_del_all or use lock between consumers.
+ */
+struct llist_node *llist_del_first(struct llist_head *head)
+{
+ struct llist_node *entry, *old_entry, *next;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ entry = head->first;
+ do {
+ if (entry == NULL)
+ return NULL;
+ old_entry = entry;
+ next = entry->next;
+ cpu_relax();
+ } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry);
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(llist_del_first);
+
+/**
+ * llist_del_all - delete all entries from lock-less list
+ * @head: the head of lock-less list to delete all entries
+ *
+ * If list is empty, return NULL, otherwise, delete all entries and
+ * return the pointer to the first entry. The order of entries
+ * deleted is from the newest to the oldest added one.
+ */
+struct llist_node *llist_del_all(struct llist_head *head)
+{
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ return xchg(&head->first, NULL);
+}
+EXPORT_SYMBOL_GPL(llist_del_all);
diff --git a/lib/md5.c b/lib/md5.c
new file mode 100644
index 00000000000..c777180e1f2
--- /dev/null
+++ b/lib/md5.c
@@ -0,0 +1,95 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cryptohash.h>
+
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+#define MD5STEP(f, w, x, y, z, in, s) \
+ (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
+
+void md5_transform(__u32 *hash, __u32 const *in)
+{
+ u32 a, b, c, d;
+
+ a = hash[0];
+ b = hash[1];
+ c = hash[2];
+ d = hash[3];
+
+ MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+ MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+ MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+ MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+ MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+ MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+ MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+ MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+ MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+ MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+ MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+ MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+ MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+ MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+ MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+ MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+ MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+ MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+ MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+ MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+ MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+ MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+ MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+ MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+ MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+ MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+ MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+ MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+ MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+ MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+ MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+ MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+ MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+ MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+ MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+ MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+ MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+ MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+ MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+ MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+ MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+ MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+ MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+ MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+ MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+ MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+ MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+ MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+ MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+ MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+ MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+ MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+ MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+ MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+ MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+ MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+ MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+ MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+ MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+ MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+ MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+ MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+ MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+ MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+ hash[0] += a;
+ hash[1] += b;
+ hash[2] += c;
+ hash[3] += d;
+}
+EXPORT_SYMBOL(md5_transform);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7ea2e033d71..a2f9da59c19 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -823,8 +823,8 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
EXPORT_SYMBOL(radix_tree_prev_hole);
static unsigned int
-__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
- unsigned int max_items, unsigned long *next_index)
+__lookup(struct radix_tree_node *slot, void ***results, unsigned long *indices,
+ unsigned long index, unsigned int max_items, unsigned long *next_index)
{
unsigned int nr_found = 0;
unsigned int shift, height;
@@ -857,12 +857,16 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
/* Bottom level: grab some items */
for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
- index++;
if (slot->slots[i]) {
- results[nr_found++] = &(slot->slots[i]);
- if (nr_found == max_items)
+ results[nr_found] = &(slot->slots[i]);
+ if (indices)
+ indices[nr_found] = index;
+ if (++nr_found == max_items) {
+ index++;
goto out;
+ }
}
+ index++;
}
out:
*next_index = index;
@@ -918,8 +922,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
if (cur_index > max_index)
break;
- slots_found = __lookup(node, (void ***)results + ret, cur_index,
- max_items - ret, &next_index);
+ slots_found = __lookup(node, (void ***)results + ret, NULL,
+ cur_index, max_items - ret, &next_index);
nr_found = 0;
for (i = 0; i < slots_found; i++) {
struct radix_tree_node *slot;
@@ -944,6 +948,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
* radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
* @root: radix tree root
* @results: where the results of the lookup are placed
+ * @indices: where their indices should be placed (but usually NULL)
* @first_index: start the lookup from this key
* @max_items: place up to this many items at *results
*
@@ -958,7 +963,8 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
* protection, radix_tree_deref_slot may fail requiring a retry.
*/
unsigned int
-radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
+radix_tree_gang_lookup_slot(struct radix_tree_root *root,
+ void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items)
{
unsigned long max_index;
@@ -974,6 +980,8 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
if (first_index > 0)
return 0;
results[0] = (void **)&root->rnode;
+ if (indices)
+ indices[0] = 0;
return 1;
}
node = indirect_to_ptr(node);
@@ -987,8 +995,9 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
if (cur_index > max_index)
break;
- slots_found = __lookup(node, results + ret, cur_index,
- max_items - ret, &next_index);
+ slots_found = __lookup(node, results + ret,
+ indices ? indices + ret : NULL,
+ cur_index, max_items - ret, &next_index);
ret += slots_found;
if (next_index == 0)
break;
@@ -1194,6 +1203,98 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
}
EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
+#if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
+#include <linux/sched.h> /* for cond_resched() */
+
+/*
+ * This linear search is at present only useful to shmem_unuse_inode().
+ */
+static unsigned long __locate(struct radix_tree_node *slot, void *item,
+ unsigned long index, unsigned long *found_index)
+{
+ unsigned int shift, height;
+ unsigned long i;
+
+ height = slot->height;
+ shift = (height-1) * RADIX_TREE_MAP_SHIFT;
+
+ for ( ; height > 1; height--) {
+ i = (index >> shift) & RADIX_TREE_MAP_MASK;
+ for (;;) {
+ if (slot->slots[i] != NULL)
+ break;
+ index &= ~((1UL << shift) - 1);
+ index += 1UL << shift;
+ if (index == 0)
+ goto out; /* 32-bit wraparound */
+ i++;
+ if (i == RADIX_TREE_MAP_SIZE)
+ goto out;
+ }
+
+ shift -= RADIX_TREE_MAP_SHIFT;
+ slot = rcu_dereference_raw(slot->slots[i]);
+ if (slot == NULL)
+ goto out;
+ }
+
+ /* Bottom level: check items */
+ for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
+ if (slot->slots[i] == item) {
+ *found_index = index + i;
+ index = 0;
+ goto out;
+ }
+ }
+ index += RADIX_TREE_MAP_SIZE;
+out:
+ return index;
+}
+
+/**
+ * radix_tree_locate_item - search through radix tree for item
+ * @root: radix tree root
+ * @item: item to be found
+ *
+ * Returns index where item was found, or -1 if not found.
+ * Caller must hold no lock (since this time-consuming function needs
+ * to be preemptible), and must check afterwards if item is still there.
+ */
+unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
+{
+ struct radix_tree_node *node;
+ unsigned long max_index;
+ unsigned long cur_index = 0;
+ unsigned long found_index = -1;
+
+ do {
+ rcu_read_lock();
+ node = rcu_dereference_raw(root->rnode);
+ if (!radix_tree_is_indirect_ptr(node)) {
+ rcu_read_unlock();
+ if (node == item)
+ found_index = 0;
+ break;
+ }
+
+ node = indirect_to_ptr(node);
+ max_index = radix_tree_maxindex(node->height);
+ if (cur_index > max_index)
+ break;
+
+ cur_index = __locate(node, item, cur_index, &found_index);
+ rcu_read_unlock();
+ cond_resched();
+ } while (cur_index != 0 && cur_index <= max_index);
+
+ return found_index;
+}
+#else
+unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
+{
+ return -1;
+}
+#endif /* CONFIG_SHMEM && CONFIG_SWAP */
/**
* radix_tree_shrink - shrink height of a radix tree to minimal
diff --git a/lib/sha1.c b/lib/sha1.c
index 4c45fd50e91..f33271dd00c 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -1,31 +1,72 @@
/*
- * SHA transform algorithm, originally taken from code written by
- * Peter Gutmann, and placed in the public domain.
+ * SHA1 routine optimized to do word accesses rather than byte accesses,
+ * and to avoid unnecessary copies into the context array.
+ *
+ * This was based on the git SHA1 implementation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/cryptohash.h>
+#include <linux/bitops.h>
+#include <asm/unaligned.h>
-/* The SHA f()-functions. */
+/*
+ * If you have 32 registers or more, the compiler can (and should)
+ * try to change the array[] accesses into registers. However, on
+ * machines with less than ~25 registers, that won't really work,
+ * and at least gcc will make an unholy mess of it.
+ *
+ * So to avoid that mess which just slows things down, we force
+ * the stores to memory to actually happen (we might be better off
+ * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
+ * suggested by Artur Skawina - that will also make gcc unable to
+ * try to do the silly "optimize away loads" part because it won't
+ * see what the value will be).
+ *
+ * Ben Herrenschmidt reports that on PPC, the C version comes close
+ * to the optimized asm with this (ie on PPC you don't want that
+ * 'volatile', since there are lots of registers).
+ *
+ * On ARM we get the best code generation by forcing a full memory barrier
+ * between each SHA_ROUND, otherwise gcc happily get wild with spilling and
+ * the stack frame size simply explode and performance goes down the drain.
+ */
-#define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */
-#define f2(x,y,z) (x ^ y ^ z) /* XOR */
-#define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */
+#ifdef CONFIG_X86
+ #define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
+#elif defined(CONFIG_ARM)
+ #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
+#else
+ #define setW(x, val) (W(x) = (val))
+#endif
-/* The SHA Mysterious Constants */
+/* This "rolls" over the 512-bit array */
+#define W(x) (array[(x)&15])
-#define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */
-#define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */
-#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */
-#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */
+/*
+ * Where do we get the source from? The first 16 iterations get it from
+ * the input data, the next mix it from the 512-bit array.
+ */
+#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t)
+#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
+
+#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
+ __u32 TEMP = input(t); setW(t, TEMP); \
+ E += TEMP + rol32(A,5) + (fn) + (constant); \
+ B = ror32(B, 2); } while (0)
+
+#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
+#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
+#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
+#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
+#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
/**
* sha_transform - single block SHA1 transform
*
* @digest: 160 bit digest to update
* @data: 512 bits of data to hash
- * @W: 80 words of workspace (see note)
+ * @array: 16 words of workspace (see note)
*
* This function generates a SHA1 digest for a single 512-bit block.
* Be warned, it does not handle padding and message digest, do not
@@ -36,47 +77,111 @@
* to clear the workspace. This is left to the caller to avoid
* unnecessary clears between chained hashing operations.
*/
-void sha_transform(__u32 *digest, const char *in, __u32 *W)
+void sha_transform(__u32 *digest, const char *data, __u32 *array)
{
- __u32 a, b, c, d, e, t, i;
-
- for (i = 0; i < 16; i++)
- W[i] = be32_to_cpu(((const __be32 *)in)[i]);
-
- for (i = 0; i < 64; i++)
- W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1);
-
- a = digest[0];
- b = digest[1];
- c = digest[2];
- d = digest[3];
- e = digest[4];
-
- for (i = 0; i < 20; i++) {
- t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i];
- e = d; d = c; c = rol32(b, 30); b = a; a = t;
- }
-
- for (; i < 40; i ++) {
- t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i];
- e = d; d = c; c = rol32(b, 30); b = a; a = t;
- }
-
- for (; i < 60; i ++) {
- t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i];
- e = d; d = c; c = rol32(b, 30); b = a; a = t;
- }
-
- for (; i < 80; i ++) {
- t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i];
- e = d; d = c; c = rol32(b, 30); b = a; a = t;
- }
-
- digest[0] += a;
- digest[1] += b;
- digest[2] += c;
- digest[3] += d;
- digest[4] += e;
+ __u32 A, B, C, D, E;
+
+ A = digest[0];
+ B = digest[1];
+ C = digest[2];
+ D = digest[3];
+ E = digest[4];
+
+ /* Round 1 - iterations 0-16 take their input from 'data' */
+ T_0_15( 0, A, B, C, D, E);
+ T_0_15( 1, E, A, B, C, D);
+ T_0_15( 2, D, E, A, B, C);
+ T_0_15( 3, C, D, E, A, B);
+ T_0_15( 4, B, C, D, E, A);
+ T_0_15( 5, A, B, C, D, E);
+ T_0_15( 6, E, A, B, C, D);
+ T_0_15( 7, D, E, A, B, C);
+ T_0_15( 8, C, D, E, A, B);
+ T_0_15( 9, B, C, D, E, A);
+ T_0_15(10, A, B, C, D, E);
+ T_0_15(11, E, A, B, C, D);
+ T_0_15(12, D, E, A, B, C);
+ T_0_15(13, C, D, E, A, B);
+ T_0_15(14, B, C, D, E, A);
+ T_0_15(15, A, B, C, D, E);
+
+ /* Round 1 - tail. Input from 512-bit mixing array */
+ T_16_19(16, E, A, B, C, D);
+ T_16_19(17, D, E, A, B, C);
+ T_16_19(18, C, D, E, A, B);
+ T_16_19(19, B, C, D, E, A);
+
+ /* Round 2 */
+ T_20_39(20, A, B, C, D, E);
+ T_20_39(21, E, A, B, C, D);
+ T_20_39(22, D, E, A, B, C);
+ T_20_39(23, C, D, E, A, B);
+ T_20_39(24, B, C, D, E, A);
+ T_20_39(25, A, B, C, D, E);
+ T_20_39(26, E, A, B, C, D);
+ T_20_39(27, D, E, A, B, C);
+ T_20_39(28, C, D, E, A, B);
+ T_20_39(29, B, C, D, E, A);
+ T_20_39(30, A, B, C, D, E);
+ T_20_39(31, E, A, B, C, D);
+ T_20_39(32, D, E, A, B, C);
+ T_20_39(33, C, D, E, A, B);
+ T_20_39(34, B, C, D, E, A);
+ T_20_39(35, A, B, C, D, E);
+ T_20_39(36, E, A, B, C, D);
+ T_20_39(37, D, E, A, B, C);
+ T_20_39(38, C, D, E, A, B);
+ T_20_39(39, B, C, D, E, A);
+
+ /* Round 3 */
+ T_40_59(40, A, B, C, D, E);
+ T_40_59(41, E, A, B, C, D);
+ T_40_59(42, D, E, A, B, C);
+ T_40_59(43, C, D, E, A, B);
+ T_40_59(44, B, C, D, E, A);
+ T_40_59(45, A, B, C, D, E);
+ T_40_59(46, E, A, B, C, D);
+ T_40_59(47, D, E, A, B, C);
+ T_40_59(48, C, D, E, A, B);
+ T_40_59(49, B, C, D, E, A);
+ T_40_59(50, A, B, C, D, E);
+ T_40_59(51, E, A, B, C, D);
+ T_40_59(52, D, E, A, B, C);
+ T_40_59(53, C, D, E, A, B);
+ T_40_59(54, B, C, D, E, A);
+ T_40_59(55, A, B, C, D, E);
+ T_40_59(56, E, A, B, C, D);
+ T_40_59(57, D, E, A, B, C);
+ T_40_59(58, C, D, E, A, B);
+ T_40_59(59, B, C, D, E, A);
+
+ /* Round 4 */
+ T_60_79(60, A, B, C, D, E);
+ T_60_79(61, E, A, B, C, D);
+ T_60_79(62, D, E, A, B, C);
+ T_60_79(63, C, D, E, A, B);
+ T_60_79(64, B, C, D, E, A);
+ T_60_79(65, A, B, C, D, E);
+ T_60_79(66, E, A, B, C, D);
+ T_60_79(67, D, E, A, B, C);
+ T_60_79(68, C, D, E, A, B);
+ T_60_79(69, B, C, D, E, A);
+ T_60_79(70, A, B, C, D, E);
+ T_60_79(71, E, A, B, C, D);
+ T_60_79(72, D, E, A, B, C);
+ T_60_79(73, C, D, E, A, B);
+ T_60_79(74, B, C, D, E, A);
+ T_60_79(75, A, B, C, D, E);
+ T_60_79(76, E, A, B, C, D);
+ T_60_79(77, D, E, A, B, C);
+ T_60_79(78, C, D, E, A, B);
+ T_60_79(79, B, C, D, E, A);
+
+ digest[0] += A;
+ digest[1] += B;
+ digest[2] += C;
+ digest[3] += D;
+ digest[4] += E;
}
EXPORT_SYMBOL(sha_transform);
@@ -92,4 +197,3 @@ void sha_init(__u32 *buf)
buf[3] = 0x10325476;
buf[4] = 0xc3d2e1f0;
}
-
diff --git a/mm/failslab.c b/mm/failslab.c
index 1ce58c201dc..0dd7b8fec71 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -34,23 +34,23 @@ __setup("failslab=", setup_failslab);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init failslab_debugfs_init(void)
{
+ struct dentry *dir;
mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
- int err;
- err = init_fault_attr_dentries(&failslab.attr, "failslab");
- if (err)
- return err;
+ dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
- if (!debugfs_create_bool("ignore-gfp-wait", mode, failslab.attr.dir,
+ if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
&failslab.ignore_gfp_wait))
goto fail;
- if (!debugfs_create_bool("cache-filter", mode, failslab.attr.dir,
+ if (!debugfs_create_bool("cache-filter", mode, dir,
&failslab.cache_filter))
goto fail;
return 0;
fail:
- cleanup_fault_attr_dentries(&failslab.attr);
+ debugfs_remove_recursive(dir);
return -ENOMEM;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 867d40222ec..645a080ba4d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -33,7 +33,6 @@
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/memcontrol.h>
-#include <linux/mm_inline.h> /* for page_is_file_cache() */
#include <linux/cleancache.h>
#include "internal.h"
@@ -462,6 +461,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int error;
VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(PageSwapBacked(page));
error = mem_cgroup_cache_charge(page, current->mm,
gfp_mask & GFP_RECLAIM_MASK);
@@ -479,8 +479,6 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
if (likely(!error)) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
- if (PageSwapBacked(page))
- __inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
@@ -502,22 +500,9 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
{
int ret;
- /*
- * Splice_read and readahead add shmem/tmpfs pages into the page cache
- * before shmem_readpage has a chance to mark them as SwapBacked: they
- * need to go on the anon lru below, and mem_cgroup_cache_charge
- * (called in add_to_page_cache) needs to know where they're going too.
- */
- if (mapping_cap_swap_backed(mapping))
- SetPageSwapBacked(page);
-
ret = add_to_page_cache(page, mapping, offset, gfp_mask);
- if (ret == 0) {
- if (page_is_file_cache(page))
- lru_cache_add_file(page);
- else
- lru_cache_add_anon(page);
- }
+ if (ret == 0)
+ lru_cache_add_file(page);
return ret;
}
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
@@ -714,9 +699,16 @@ repeat:
page = radix_tree_deref_slot(pagep);
if (unlikely(!page))
goto out;
- if (radix_tree_deref_retry(page))
- goto repeat;
-
+ if (radix_tree_exception(page)) {
+ if (radix_tree_deref_retry(page))
+ goto repeat;
+ /*
+ * Otherwise, shmem/tmpfs must be storing a swap entry
+ * here as an exceptional entry: so return it without
+ * attempting to raise page count.
+ */
+ goto out;
+ }
if (!page_cache_get_speculative(page))
goto repeat;
@@ -753,7 +745,7 @@ struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
repeat:
page = find_get_page(mapping, offset);
- if (page) {
+ if (page && !radix_tree_exception(page)) {
lock_page(page);
/* Has the page been truncated? */
if (unlikely(page->mapping != mapping)) {
@@ -840,7 +832,7 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
rcu_read_lock();
restart:
nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
- (void ***)pages, start, nr_pages);
+ (void ***)pages, NULL, start, nr_pages);
ret = 0;
for (i = 0; i < nr_found; i++) {
struct page *page;
@@ -849,13 +841,22 @@ repeat:
if (unlikely(!page))
continue;
- /*
- * This can only trigger when the entry at index 0 moves out
- * of or back to the root: none yet gotten, safe to restart.
- */
- if (radix_tree_deref_retry(page)) {
- WARN_ON(start | i);
- goto restart;
+ if (radix_tree_exception(page)) {
+ if (radix_tree_deref_retry(page)) {
+ /*
+ * Transient condition which can only trigger
+ * when entry at index 0 moves out of or back
+ * to root: none yet gotten, safe to restart.
+ */
+ WARN_ON(start | i);
+ goto restart;
+ }
+ /*
+ * Otherwise, shmem/tmpfs must be storing a swap entry
+ * here as an exceptional entry: so skip over it -
+ * we only reach this from invalidate_mapping_pages().
+ */
+ continue;
}
if (!page_cache_get_speculative(page))
@@ -903,7 +904,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
rcu_read_lock();
restart:
nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
- (void ***)pages, index, nr_pages);
+ (void ***)pages, NULL, index, nr_pages);
ret = 0;
for (i = 0; i < nr_found; i++) {
struct page *page;
@@ -912,12 +913,22 @@ repeat:
if (unlikely(!page))
continue;
- /*
- * This can only trigger when the entry at index 0 moves out
- * of or back to the root: none yet gotten, safe to restart.
- */
- if (radix_tree_deref_retry(page))
- goto restart;
+ if (radix_tree_exception(page)) {
+ if (radix_tree_deref_retry(page)) {
+ /*
+ * Transient condition which can only trigger
+ * when entry at index 0 moves out of or back
+ * to root: none yet gotten, safe to restart.
+ */
+ goto restart;
+ }
+ /*
+ * Otherwise, shmem/tmpfs must be storing a swap entry
+ * here as an exceptional entry: so stop looking for
+ * contiguous pages.
+ */
+ break;
+ }
if (!page_cache_get_speculative(page))
goto repeat;
@@ -977,12 +988,21 @@ repeat:
if (unlikely(!page))
continue;
- /*
- * This can only trigger when the entry at index 0 moves out
- * of or back to the root: none yet gotten, safe to restart.
- */
- if (radix_tree_deref_retry(page))
- goto restart;
+ if (radix_tree_exception(page)) {
+ if (radix_tree_deref_retry(page)) {
+ /*
+ * Transient condition which can only trigger
+ * when entry at index 0 moves out of or back
+ * to root: none yet gotten, safe to restart.
+ */
+ goto restart;
+ }
+ /*
+ * This function is never used on a shmem/tmpfs
+ * mapping, so a swap entry won't be found here.
+ */
+ BUG();
+ }
if (!page_cache_get_speculative(page))
goto repeat;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5f84d2351dd..f4ec4e7ca4c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -35,7 +35,6 @@
#include <linux/limits.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
-#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swapops.h>
@@ -2873,30 +2872,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
return 0;
if (PageCompound(page))
return 0;
- /*
- * Corner case handling. This is called from add_to_page_cache()
- * in usual. But some FS (shmem) precharges this page before calling it
- * and call add_to_page_cache() with GFP_NOWAIT.
- *
- * For GFP_NOWAIT case, the page may be pre-charged before calling
- * add_to_page_cache(). (See shmem.c) check it here and avoid to call
- * charge twice. (It works but has to pay a bit larger cost.)
- * And when the page is SwapCache, it should take swap information
- * into account. This is under lock_page() now.
- */
- if (!(gfp_mask & __GFP_WAIT)) {
- struct page_cgroup *pc;
-
- pc = lookup_page_cgroup(page);
- if (!pc)
- return 0;
- lock_page_cgroup(pc);
- if (PageCgroupUsed(pc)) {
- unlock_page_cgroup(pc);
- return 0;
- }
- unlock_page_cgroup(pc);
- }
if (unlikely(!mm))
mm = &init_mm;
@@ -3486,31 +3461,6 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
cgroup_release_and_wakeup_rmdir(&mem->css);
}
-/*
- * A call to try to shrink memory usage on charge failure at shmem's swapin.
- * Calling hierarchical_reclaim is not enough because we should update
- * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
- * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
- * not from the memcg which this page would be charged to.
- * try_charge_swapin does all of these works properly.
- */
-int mem_cgroup_shmem_charge_fallback(struct page *page,
- struct mm_struct *mm,
- gfp_t gfp_mask)
-{
- struct mem_cgroup *mem;
- int ret;
-
- if (mem_cgroup_disabled())
- return 0;
-
- ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
- if (!ret)
- mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
-
- return ret;
-}
-
#ifdef CONFIG_DEBUG_VM
static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
{
@@ -5330,15 +5280,17 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
pgoff = pte_to_pgoff(ptent);
/* page is moved even if it's not RSS of this task(page-faulted). */
- if (!mapping_cap_swap_backed(mapping)) { /* normal file */
- page = find_get_page(mapping, pgoff);
- } else { /* shmem/tmpfs file. we should take account of swap too. */
- swp_entry_t ent;
- mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
+ page = find_get_page(mapping, pgoff);
+
+#ifdef CONFIG_SWAP
+ /* shmem/tmpfs may report page out on swap: account for that too. */
+ if (radix_tree_exceptional_entry(page)) {
+ swp_entry_t swap = radix_to_swp_entry(page);
if (do_swap_account)
- entry->val = ent.val;
+ *entry = swap;
+ page = find_get_page(&swapper_space, swap.val);
}
-
+#endif
return page;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 740c4f52059..2b43ba051ac 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -53,6 +53,7 @@
#include <linux/hugetlb.h>
#include <linux/memory_hotplug.h>
#include <linux/mm_inline.h>
+#include <linux/kfifo.h>
#include "internal.h"
int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -1178,6 +1179,97 @@ void memory_failure(unsigned long pfn, int trapno)
__memory_failure(pfn, trapno, 0);
}
+#define MEMORY_FAILURE_FIFO_ORDER 4
+#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
+
+struct memory_failure_entry {
+ unsigned long pfn;
+ int trapno;
+ int flags;
+};
+
+struct memory_failure_cpu {
+ DECLARE_KFIFO(fifo, struct memory_failure_entry,
+ MEMORY_FAILURE_FIFO_SIZE);
+ spinlock_t lock;
+ struct work_struct work;
+};
+
+static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
+
+/**
+ * memory_failure_queue - Schedule handling memory failure of a page.
+ * @pfn: Page Number of the corrupted page
+ * @trapno: Trap number reported in the signal to user space.
+ * @flags: Flags for memory failure handling
+ *
+ * This function is called by the low level hardware error handler
+ * when it detects hardware memory corruption of a page. It schedules
+ * the recovering of error page, including dropping pages, killing
+ * processes etc.
+ *
+ * The function is primarily of use for corruptions that
+ * happen outside the current execution context (e.g. when
+ * detected by a background scrubber)
+ *
+ * Can run in IRQ context.
+ */
+void memory_failure_queue(unsigned long pfn, int trapno, int flags)
+{
+ struct memory_failure_cpu *mf_cpu;
+ unsigned long proc_flags;
+ struct memory_failure_entry entry = {
+ .pfn = pfn,
+ .trapno = trapno,
+ .flags = flags,
+ };
+
+ mf_cpu = &get_cpu_var(memory_failure_cpu);
+ spin_lock_irqsave(&mf_cpu->lock, proc_flags);
+ if (kfifo_put(&mf_cpu->fifo, &entry))
+ schedule_work_on(smp_processor_id(), &mf_cpu->work);
+ else
+ pr_err("Memory failure: buffer overflow when queuing memory failure at 0x%#lx\n",
+ pfn);
+ spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
+ put_cpu_var(memory_failure_cpu);
+}
+EXPORT_SYMBOL_GPL(memory_failure_queue);
+
+static void memory_failure_work_func(struct work_struct *work)
+{
+ struct memory_failure_cpu *mf_cpu;
+ struct memory_failure_entry entry = { 0, };
+ unsigned long proc_flags;
+ int gotten;
+
+ mf_cpu = &__get_cpu_var(memory_failure_cpu);
+ for (;;) {
+ spin_lock_irqsave(&mf_cpu->lock, proc_flags);
+ gotten = kfifo_get(&mf_cpu->fifo, &entry);
+ spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
+ if (!gotten)
+ break;
+ __memory_failure(entry.pfn, entry.trapno, entry.flags);
+ }
+}
+
+static int __init memory_failure_init(void)
+{
+ struct memory_failure_cpu *mf_cpu;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ mf_cpu = &per_cpu(memory_failure_cpu, cpu);
+ spin_lock_init(&mf_cpu->lock);
+ INIT_KFIFO(mf_cpu->fifo);
+ INIT_WORK(&mf_cpu->work, memory_failure_work_func);
+ }
+
+ return 0;
+}
+core_initcall(memory_failure_init);
+
/**
* unpoison_memory - Unpoison a previously poisoned page
* @pfn: Page number of the to be unpoisoned page
diff --git a/mm/mincore.c b/mm/mincore.c
index a4e6b9d75c7..636a86876ff 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -69,12 +69,15 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
* file will not get a swp_entry_t in its pte, but rather it is like
* any other file mapping (ie. marked !present and faulted in with
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
- *
- * However when tmpfs moves the page from pagecache and into swapcache,
- * it is still in core, but the find_get_page below won't find it.
- * No big deal, but make a note of it.
*/
page = find_get_page(mapping, pgoff);
+#ifdef CONFIG_SWAP
+ /* shmem/tmpfs may return swap: account for swapcache page too. */
+ if (radix_tree_exceptional_entry(page)) {
+ swp_entry_t swap = radix_to_swp_entry(page);
+ page = find_get_page(&swapper_space, swap.val);
+ }
+#endif
if (page) {
present = PageUptodate(page);
page_cache_release(page);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index eafff89b3dd..626303b52f3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -303,7 +303,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
do_each_thread(g, p) {
unsigned int points;
- if (!p->mm)
+ if (p->exit_state)
continue;
if (oom_unkillable_task(p, mem, nodemask))
continue;
@@ -319,6 +319,8 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
*/
if (test_tsk_thread_flag(p, TIF_MEMDIE))
return ERR_PTR(-1UL);
+ if (!p->mm)
+ continue;
if (p->flags & PF_EXITING) {
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1dbcf8888f1..6e8ecb6e021 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1409,14 +1409,11 @@ static int __init fail_page_alloc_debugfs(void)
{
mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
- int err;
- err = init_fault_attr_dentries(&fail_page_alloc.attr,
- "fail_page_alloc");
- if (err)
- return err;
-
- dir = fail_page_alloc.attr.dir;
+ dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
+ &fail_page_alloc.attr);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
&fail_page_alloc.ignore_gfp_wait))
@@ -1430,7 +1427,7 @@ static int __init fail_page_alloc_debugfs(void)
return 0;
fail:
- cleanup_fault_attr_dentries(&fail_page_alloc.attr);
+ debugfs_remove_recursive(dir);
return -ENOMEM;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 5cc21f8b4cd..32f6763f16f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -6,7 +6,8 @@
* 2000-2001 Christoph Rohland
* 2000-2001 SAP AG
* 2002 Red Hat Inc.
- * Copyright (C) 2002-2005 Hugh Dickins.
+ * Copyright (C) 2002-2011 Hugh Dickins.
+ * Copyright (C) 2011 Google Inc.
* Copyright (C) 2002-2005 VERITAS Software Corporation.
* Copyright (C) 2004 Andi Kleen, SuSE Labs
*
@@ -28,7 +29,6 @@
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/percpu_counter.h>
#include <linux/swap.h>
static struct vfsmount *shm_mnt;
@@ -51,6 +51,8 @@ static struct vfsmount *shm_mnt;
#include <linux/shmem_fs.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
+#include <linux/pagevec.h>
+#include <linux/percpu_counter.h>
#include <linux/splice.h>
#include <linux/security.h>
#include <linux/swapops.h>
@@ -63,43 +65,17 @@ static struct vfsmount *shm_mnt;
#include <linux/magic.h>
#include <asm/uaccess.h>
-#include <asm/div64.h>
#include <asm/pgtable.h>
-/*
- * The maximum size of a shmem/tmpfs file is limited by the maximum size of
- * its triple-indirect swap vector - see illustration at shmem_swp_entry().
- *
- * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
- * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
- * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
- * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
- *
- * We use / and * instead of shifts in the definitions below, so that the swap
- * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
- */
-#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
-#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
-
-#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
-#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
-
-#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
-#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
-
#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
-/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
-#define SHMEM_PAGEIN VM_READ
-#define SHMEM_TRUNCATE VM_WRITE
-
-/* Definition to limit shmem_truncate's steps between cond_rescheds */
-#define LATENCY_LIMIT 64
-
/* Pretend that each entry is of this size in directory's i_size */
#define BOGO_DIRENT_SIZE 20
+/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
+#define SHORT_SYMLINK_LEN 128
+
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
char *name; /* xattr name */
@@ -107,7 +83,7 @@ struct shmem_xattr {
char value[0];
};
-/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
+/* Flag allocation requirements to shmem_getpage */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
@@ -137,56 +113,6 @@ static inline int shmem_getpage(struct inode *inode, pgoff_t index,
mapping_gfp_mask(inode->i_mapping), fault_type);
}
-static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
-{
- /*
- * The above definition of ENTRIES_PER_PAGE, and the use of
- * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
- * might be reconsidered if it ever diverges from PAGE_SIZE.
- *
- * Mobility flags are masked out as swap vectors cannot move
- */
- return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
- PAGE_CACHE_SHIFT-PAGE_SHIFT);
-}
-
-static inline void shmem_dir_free(struct page *page)
-{
- __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
-}
-
-static struct page **shmem_dir_map(struct page *page)
-{
- return (struct page **)kmap_atomic(page, KM_USER0);
-}
-
-static inline void shmem_dir_unmap(struct page **dir)
-{
- kunmap_atomic(dir, KM_USER0);
-}
-
-static swp_entry_t *shmem_swp_map(struct page *page)
-{
- return (swp_entry_t *)kmap_atomic(page, KM_USER1);
-}
-
-static inline void shmem_swp_balance_unmap(void)
-{
- /*
- * When passing a pointer to an i_direct entry, to code which
- * also handles indirect entries and so will shmem_swp_unmap,
- * we must arrange for the preempt count to remain in balance.
- * What kmap_atomic of a lowmem page does depends on config
- * and architecture, so pretend to kmap_atomic some lowmem page.
- */
- (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
-}
-
-static inline void shmem_swp_unmap(swp_entry_t *entry)
-{
- kunmap_atomic(entry, KM_USER1);
-}
-
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
{
return sb->s_fs_info;
@@ -244,15 +170,6 @@ static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
static LIST_HEAD(shmem_swaplist);
static DEFINE_MUTEX(shmem_swaplist_mutex);
-static void shmem_free_blocks(struct inode *inode, long pages)
-{
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- if (sbinfo->max_blocks) {
- percpu_counter_add(&sbinfo->used_blocks, -pages);
- inode->i_blocks -= pages*BLOCKS_PER_PAGE;
- }
-}
-
static int shmem_reserve_inode(struct super_block *sb)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
@@ -279,7 +196,7 @@ static void shmem_free_inode(struct super_block *sb)
}
/**
- * shmem_recalc_inode - recalculate the size of an inode
+ * shmem_recalc_inode - recalculate the block usage of an inode
* @inode: inode to recalc
*
* We have to calculate the free blocks since the mm can drop
@@ -297,474 +214,297 @@ static void shmem_recalc_inode(struct inode *inode)
freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
if (freed > 0) {
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ if (sbinfo->max_blocks)
+ percpu_counter_add(&sbinfo->used_blocks, -freed);
info->alloced -= freed;
+ inode->i_blocks -= freed * BLOCKS_PER_PAGE;
shmem_unacct_blocks(info->flags, freed);
- shmem_free_blocks(inode, freed);
}
}
-/**
- * shmem_swp_entry - find the swap vector position in the info structure
- * @info: info structure for the inode
- * @index: index of the page to find
- * @page: optional page to add to the structure. Has to be preset to
- * all zeros
- *
- * If there is no space allocated yet it will return NULL when
- * page is NULL, else it will use the page for the needed block,
- * setting it to NULL on return to indicate that it has been used.
- *
- * The swap vector is organized the following way:
- *
- * There are SHMEM_NR_DIRECT entries directly stored in the
- * shmem_inode_info structure. So small files do not need an addional
- * allocation.
- *
- * For pages with index > SHMEM_NR_DIRECT there is the pointer
- * i_indirect which points to a page which holds in the first half
- * doubly indirect blocks, in the second half triple indirect blocks:
- *
- * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
- * following layout (for SHMEM_NR_DIRECT == 16):
- *
- * i_indirect -> dir --> 16-19
- * | +-> 20-23
- * |
- * +-->dir2 --> 24-27
- * | +-> 28-31
- * | +-> 32-35
- * | +-> 36-39
- * |
- * +-->dir3 --> 40-43
- * +-> 44-47
- * +-> 48-51
- * +-> 52-55
+/*
+ * Replace item expected in radix tree by a new item, while holding tree lock.
*/
-static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
-{
- unsigned long offset;
- struct page **dir;
- struct page *subdir;
-
- if (index < SHMEM_NR_DIRECT) {
- shmem_swp_balance_unmap();
- return info->i_direct+index;
- }
- if (!info->i_indirect) {
- if (page) {
- info->i_indirect = *page;
- *page = NULL;
- }
- return NULL; /* need another page */
- }
-
- index -= SHMEM_NR_DIRECT;
- offset = index % ENTRIES_PER_PAGE;
- index /= ENTRIES_PER_PAGE;
- dir = shmem_dir_map(info->i_indirect);
-
- if (index >= ENTRIES_PER_PAGE/2) {
- index -= ENTRIES_PER_PAGE/2;
- dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
- index %= ENTRIES_PER_PAGE;
- subdir = *dir;
- if (!subdir) {
- if (page) {
- *dir = *page;
- *page = NULL;
- }
- shmem_dir_unmap(dir);
- return NULL; /* need another page */
- }
- shmem_dir_unmap(dir);
- dir = shmem_dir_map(subdir);
- }
+static int shmem_radix_tree_replace(struct address_space *mapping,
+ pgoff_t index, void *expected, void *replacement)
+{
+ void **pslot;
+ void *item = NULL;
+
+ VM_BUG_ON(!expected);
+ pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
+ if (pslot)
+ item = radix_tree_deref_slot_protected(pslot,
+ &mapping->tree_lock);
+ if (item != expected)
+ return -ENOENT;
+ if (replacement)
+ radix_tree_replace_slot(pslot, replacement);
+ else
+ radix_tree_delete(&mapping->page_tree, index);
+ return 0;
+}
- dir += index;
- subdir = *dir;
- if (!subdir) {
- if (!page || !(subdir = *page)) {
- shmem_dir_unmap(dir);
- return NULL; /* need a page */
+/*
+ * Like add_to_page_cache_locked, but error if expected item has gone.
+ */
+static int shmem_add_to_page_cache(struct page *page,
+ struct address_space *mapping,
+ pgoff_t index, gfp_t gfp, void *expected)
+{
+ int error = 0;
+
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(!PageSwapBacked(page));
+
+ if (!expected)
+ error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+ if (!error) {
+ page_cache_get(page);
+ page->mapping = mapping;
+ page->index = index;
+
+ spin_lock_irq(&mapping->tree_lock);
+ if (!expected)
+ error = radix_tree_insert(&mapping->page_tree,
+ index, page);
+ else
+ error = shmem_radix_tree_replace(mapping, index,
+ expected, page);
+ if (!error) {
+ mapping->nrpages++;
+ __inc_zone_page_state(page, NR_FILE_PAGES);
+ __inc_zone_page_state(page, NR_SHMEM);
+ spin_unlock_irq(&mapping->tree_lock);
+ } else {
+ page->mapping = NULL;
+ spin_unlock_irq(&mapping->tree_lock);
+ page_cache_release(page);
}
- *dir = subdir;
- *page = NULL;
+ if (!expected)
+ radix_tree_preload_end();
}
- shmem_dir_unmap(dir);
- return shmem_swp_map(subdir) + offset;
+ if (error)
+ mem_cgroup_uncharge_cache_page(page);
+ return error;
}
-static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
+/*
+ * Like delete_from_page_cache, but substitutes swap for page.
+ */
+static void shmem_delete_from_page_cache(struct page *page, void *radswap)
{
- long incdec = value? 1: -1;
+ struct address_space *mapping = page->mapping;
+ int error;
- entry->val = value;
- info->swapped += incdec;
- if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
- struct page *page = kmap_atomic_to_page(entry);
- set_page_private(page, page_private(page) + incdec);
- }
+ spin_lock_irq(&mapping->tree_lock);
+ error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
+ page->mapping = NULL;
+ mapping->nrpages--;
+ __dec_zone_page_state(page, NR_FILE_PAGES);
+ __dec_zone_page_state(page, NR_SHMEM);
+ spin_unlock_irq(&mapping->tree_lock);
+ page_cache_release(page);
+ BUG_ON(error);
}
-/**
- * shmem_swp_alloc - get the position of the swap entry for the page.
- * @info: info structure for the inode
- * @index: index of the page to find
- * @sgp: check and recheck i_size? skip allocation?
- * @gfp: gfp mask to use for any page allocation
- *
- * If the entry does not exist, allocate it.
+/*
+ * Like find_get_pages, but collecting swap entries as well as pages.
*/
-static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info,
- unsigned long index, enum sgp_type sgp, gfp_t gfp)
-{
- struct inode *inode = &info->vfs_inode;
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- struct page *page = NULL;
- swp_entry_t *entry;
-
- if (sgp != SGP_WRITE &&
- ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
- return ERR_PTR(-EINVAL);
-
- while (!(entry = shmem_swp_entry(info, index, &page))) {
- if (sgp == SGP_READ)
- return shmem_swp_map(ZERO_PAGE(0));
- /*
- * Test used_blocks against 1 less max_blocks, since we have 1 data
- * page (and perhaps indirect index pages) yet to allocate:
- * a waste to allocate index if we cannot allocate data.
- */
- if (sbinfo->max_blocks) {
- if (percpu_counter_compare(&sbinfo->used_blocks,
- sbinfo->max_blocks - 1) >= 0)
- return ERR_PTR(-ENOSPC);
- percpu_counter_inc(&sbinfo->used_blocks);
- inode->i_blocks += BLOCKS_PER_PAGE;
+static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
+ pgoff_t start, unsigned int nr_pages,
+ struct page **pages, pgoff_t *indices)
+{
+ unsigned int i;
+ unsigned int ret;
+ unsigned int nr_found;
+
+ rcu_read_lock();
+restart:
+ nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
+ (void ***)pages, indices, start, nr_pages);
+ ret = 0;
+ for (i = 0; i < nr_found; i++) {
+ struct page *page;
+repeat:
+ page = radix_tree_deref_slot((void **)pages[i]);
+ if (unlikely(!page))
+ continue;
+ if (radix_tree_exception(page)) {
+ if (radix_tree_deref_retry(page))
+ goto restart;
+ /*
+ * Otherwise, we must be storing a swap entry
+ * here as an exceptional entry: so return it
+ * without attempting to raise page count.
+ */
+ goto export;
}
+ if (!page_cache_get_speculative(page))
+ goto repeat;
- spin_unlock(&info->lock);
- page = shmem_dir_alloc(gfp);
- spin_lock(&info->lock);
-
- if (!page) {
- shmem_free_blocks(inode, 1);
- return ERR_PTR(-ENOMEM);
- }
- if (sgp != SGP_WRITE &&
- ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
- entry = ERR_PTR(-EINVAL);
- break;
+ /* Has the page moved? */
+ if (unlikely(page != *((void **)pages[i]))) {
+ page_cache_release(page);
+ goto repeat;
}
- if (info->next_index <= index)
- info->next_index = index + 1;
- }
- if (page) {
- /* another task gave its page, or truncated the file */
- shmem_free_blocks(inode, 1);
- shmem_dir_free(page);
- }
- if (info->next_index <= index && !IS_ERR(entry))
- info->next_index = index + 1;
- return entry;
+export:
+ indices[ret] = indices[i];
+ pages[ret] = page;
+ ret++;
+ }
+ if (unlikely(!ret && nr_found))
+ goto restart;
+ rcu_read_unlock();
+ return ret;
}
-/**
- * shmem_free_swp - free some swap entries in a directory
- * @dir: pointer to the directory
- * @edir: pointer after last entry of the directory
- * @punch_lock: pointer to spinlock when needed for the holepunch case
+/*
+ * Remove swap entry from radix tree, free the swap and its page cache.
*/
-static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
- spinlock_t *punch_lock)
-{
- spinlock_t *punch_unlock = NULL;
- swp_entry_t *ptr;
- int freed = 0;
-
- for (ptr = dir; ptr < edir; ptr++) {
- if (ptr->val) {
- if (unlikely(punch_lock)) {
- punch_unlock = punch_lock;
- punch_lock = NULL;
- spin_lock(punch_unlock);
- if (!ptr->val)
- continue;
- }
- free_swap_and_cache(*ptr);
- *ptr = (swp_entry_t){0};
- freed++;
- }
- }
- if (punch_unlock)
- spin_unlock(punch_unlock);
- return freed;
-}
-
-static int shmem_map_and_free_swp(struct page *subdir, int offset,
- int limit, struct page ***dir, spinlock_t *punch_lock)
-{
- swp_entry_t *ptr;
- int freed = 0;
-
- ptr = shmem_swp_map(subdir);
- for (; offset < limit; offset += LATENCY_LIMIT) {
- int size = limit - offset;
- if (size > LATENCY_LIMIT)
- size = LATENCY_LIMIT;
- freed += shmem_free_swp(ptr+offset, ptr+offset+size,
- punch_lock);
- if (need_resched()) {
- shmem_swp_unmap(ptr);
- if (*dir) {
- shmem_dir_unmap(*dir);
- *dir = NULL;
- }
- cond_resched();
- ptr = shmem_swp_map(subdir);
- }
- }
- shmem_swp_unmap(ptr);
- return freed;
+static int shmem_free_swap(struct address_space *mapping,
+ pgoff_t index, void *radswap)
+{
+ int error;
+
+ spin_lock_irq(&mapping->tree_lock);
+ error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
+ spin_unlock_irq(&mapping->tree_lock);
+ if (!error)
+ free_swap_and_cache(radix_to_swp_entry(radswap));
+ return error;
}
-static void shmem_free_pages(struct list_head *next)
+/*
+ * Pagevec may contain swap entries, so shuffle up pages before releasing.
+ */
+static void shmem_pagevec_release(struct pagevec *pvec)
{
- struct page *page;
- int freed = 0;
-
- do {
- page = container_of(next, struct page, lru);
- next = next->next;
- shmem_dir_free(page);
- freed++;
- if (freed >= LATENCY_LIMIT) {
- cond_resched();
- freed = 0;
- }
- } while (next);
+ int i, j;
+
+ for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
+ struct page *page = pvec->pages[i];
+ if (!radix_tree_exceptional_entry(page))
+ pvec->pages[j++] = page;
+ }
+ pvec->nr = j;
+ pagevec_release(pvec);
}
-void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+/*
+ * Remove range of pages and swap entries from radix tree, and free them.
+ */
+void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
+ struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
- unsigned long idx;
- unsigned long size;
- unsigned long limit;
- unsigned long stage;
- unsigned long diroff;
- struct page **dir;
- struct page *topdir;
- struct page *middir;
- struct page *subdir;
- swp_entry_t *ptr;
- LIST_HEAD(pages_to_free);
- long nr_pages_to_free = 0;
+ pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
+ pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
+ struct pagevec pvec;
+ pgoff_t indices[PAGEVEC_SIZE];
long nr_swaps_freed = 0;
- int offset;
- int freed;
- int punch_hole;
- spinlock_t *needs_lock;
- spinlock_t *punch_lock;
- unsigned long upper_limit;
+ pgoff_t index;
+ int i;
- truncate_inode_pages_range(inode->i_mapping, start, end);
+ BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
- inode->i_ctime = inode->i_mtime = CURRENT_TIME;
- idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (idx >= info->next_index)
- return;
+ pagevec_init(&pvec, 0);
+ index = start;
+ while (index <= end) {
+ pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+ pvec.pages, indices);
+ if (!pvec.nr)
+ break;
+ mem_cgroup_uncharge_start();
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ struct page *page = pvec.pages[i];
- spin_lock(&info->lock);
- info->flags |= SHMEM_TRUNCATE;
- if (likely(end == (loff_t) -1)) {
- limit = info->next_index;
- upper_limit = SHMEM_MAX_INDEX;
- info->next_index = idx;
- needs_lock = NULL;
- punch_hole = 0;
- } else {
- if (end + 1 >= inode->i_size) { /* we may free a little more */
- limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
- upper_limit = SHMEM_MAX_INDEX;
- } else {
- limit = (end + 1) >> PAGE_CACHE_SHIFT;
- upper_limit = limit;
- }
- needs_lock = &info->lock;
- punch_hole = 1;
- }
+ index = indices[i];
+ if (index > end)
+ break;
+
+ if (radix_tree_exceptional_entry(page)) {
+ nr_swaps_freed += !shmem_free_swap(mapping,
+ index, page);
+ continue;
+ }
- topdir = info->i_indirect;
- if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
- info->i_indirect = NULL;
- nr_pages_to_free++;
- list_add(&topdir->lru, &pages_to_free);
+ if (!trylock_page(page))
+ continue;
+ if (page->mapping == mapping) {
+ VM_BUG_ON(PageWriteback(page));
+ truncate_inode_page(mapping, page);
+ }
+ unlock_page(page);
+ }
+ shmem_pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
+ cond_resched();
+ index++;
}
- spin_unlock(&info->lock);
- if (info->swapped && idx < SHMEM_NR_DIRECT) {
- ptr = info->i_direct;
- size = limit;
- if (size > SHMEM_NR_DIRECT)
- size = SHMEM_NR_DIRECT;
- nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
+ if (partial) {
+ struct page *page = NULL;
+ shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
+ if (page) {
+ zero_user_segment(page, partial, PAGE_CACHE_SIZE);
+ set_page_dirty(page);
+ unlock_page(page);
+ page_cache_release(page);
+ }
}
- /*
- * If there are no indirect blocks or we are punching a hole
- * below indirect blocks, nothing to be done.
- */
- if (!topdir || limit <= SHMEM_NR_DIRECT)
- goto done2;
+ index = start;
+ for ( ; ; ) {
+ cond_resched();
+ pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+ pvec.pages, indices);
+ if (!pvec.nr) {
+ if (index == start)
+ break;
+ index = start;
+ continue;
+ }
+ if (index == start && indices[0] > end) {
+ shmem_pagevec_release(&pvec);
+ break;
+ }
+ mem_cgroup_uncharge_start();
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ struct page *page = pvec.pages[i];
- /*
- * The truncation case has already dropped info->lock, and we're safe
- * because i_size and next_index have already been lowered, preventing
- * access beyond. But in the punch_hole case, we still need to take
- * the lock when updating the swap directory, because there might be
- * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
- * shmem_writepage. However, whenever we find we can remove a whole
- * directory page (not at the misaligned start or end of the range),
- * we first NULLify its pointer in the level above, and then have no
- * need to take the lock when updating its contents: needs_lock and
- * punch_lock (either pointing to info->lock or NULL) manage this.
- */
+ index = indices[i];
+ if (index > end)
+ break;
- upper_limit -= SHMEM_NR_DIRECT;
- limit -= SHMEM_NR_DIRECT;
- idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
- offset = idx % ENTRIES_PER_PAGE;
- idx -= offset;
-
- dir = shmem_dir_map(topdir);
- stage = ENTRIES_PER_PAGEPAGE/2;
- if (idx < ENTRIES_PER_PAGEPAGE/2) {
- middir = topdir;
- diroff = idx/ENTRIES_PER_PAGE;
- } else {
- dir += ENTRIES_PER_PAGE/2;
- dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
- while (stage <= idx)
- stage += ENTRIES_PER_PAGEPAGE;
- middir = *dir;
- if (*dir) {
- diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
- ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
- if (!diroff && !offset && upper_limit >= stage) {
- if (needs_lock) {
- spin_lock(needs_lock);
- *dir = NULL;
- spin_unlock(needs_lock);
- needs_lock = NULL;
- } else
- *dir = NULL;
- nr_pages_to_free++;
- list_add(&middir->lru, &pages_to_free);
+ if (radix_tree_exceptional_entry(page)) {
+ nr_swaps_freed += !shmem_free_swap(mapping,
+ index, page);
+ continue;
}
- shmem_dir_unmap(dir);
- dir = shmem_dir_map(middir);
- } else {
- diroff = 0;
- offset = 0;
- idx = stage;
- }
- }
- for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
- if (unlikely(idx == stage)) {
- shmem_dir_unmap(dir);
- dir = shmem_dir_map(topdir) +
- ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
- while (!*dir) {
- dir++;
- idx += ENTRIES_PER_PAGEPAGE;
- if (idx >= limit)
- goto done1;
- }
- stage = idx + ENTRIES_PER_PAGEPAGE;
- middir = *dir;
- if (punch_hole)
- needs_lock = &info->lock;
- if (upper_limit >= stage) {
- if (needs_lock) {
- spin_lock(needs_lock);
- *dir = NULL;
- spin_unlock(needs_lock);
- needs_lock = NULL;
- } else
- *dir = NULL;
- nr_pages_to_free++;
- list_add(&middir->lru, &pages_to_free);
+ lock_page(page);
+ if (page->mapping == mapping) {
+ VM_BUG_ON(PageWriteback(page));
+ truncate_inode_page(mapping, page);
}
- shmem_dir_unmap(dir);
- cond_resched();
- dir = shmem_dir_map(middir);
- diroff = 0;
- }
- punch_lock = needs_lock;
- subdir = dir[diroff];
- if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
- if (needs_lock) {
- spin_lock(needs_lock);
- dir[diroff] = NULL;
- spin_unlock(needs_lock);
- punch_lock = NULL;
- } else
- dir[diroff] = NULL;
- nr_pages_to_free++;
- list_add(&subdir->lru, &pages_to_free);
- }
- if (subdir && page_private(subdir) /* has swap entries */) {
- size = limit - idx;
- if (size > ENTRIES_PER_PAGE)
- size = ENTRIES_PER_PAGE;
- freed = shmem_map_and_free_swp(subdir,
- offset, size, &dir, punch_lock);
- if (!dir)
- dir = shmem_dir_map(middir);
- nr_swaps_freed += freed;
- if (offset || punch_lock) {
- spin_lock(&info->lock);
- set_page_private(subdir,
- page_private(subdir) - freed);
- spin_unlock(&info->lock);
- } else
- BUG_ON(page_private(subdir) != freed);
+ unlock_page(page);
}
- offset = 0;
- }
-done1:
- shmem_dir_unmap(dir);
-done2:
- if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
- /*
- * Call truncate_inode_pages again: racing shmem_unuse_inode
- * may have swizzled a page in from swap since
- * truncate_pagecache or generic_delete_inode did it, before we
- * lowered next_index. Also, though shmem_getpage checks
- * i_size before adding to cache, no recheck after: so fix the
- * narrow window there too.
- */
- truncate_inode_pages_range(inode->i_mapping, start, end);
+ shmem_pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
+ index++;
}
spin_lock(&info->lock);
- info->flags &= ~SHMEM_TRUNCATE;
info->swapped -= nr_swaps_freed;
- if (nr_pages_to_free)
- shmem_free_blocks(inode, nr_pages_to_free);
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
- /*
- * Empty swap vector directory pages to be freed?
- */
- if (!list_empty(&pages_to_free)) {
- pages_to_free.prev->next = NULL;
- shmem_free_pages(pages_to_free.next);
- }
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -780,37 +520,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
loff_t oldsize = inode->i_size;
loff_t newsize = attr->ia_size;
- struct page *page = NULL;
- if (newsize < oldsize) {
- /*
- * If truncating down to a partial page, then
- * if that page is already allocated, hold it
- * in memory until the truncation is over, so
- * truncate_partial_page cannot miss it were
- * it assigned to swap.
- */
- if (newsize & (PAGE_CACHE_SIZE-1)) {
- (void) shmem_getpage(inode,
- newsize >> PAGE_CACHE_SHIFT,
- &page, SGP_READ, NULL);
- if (page)
- unlock_page(page);
- }
- /*
- * Reset SHMEM_PAGEIN flag so that shmem_truncate can
- * detect if any pages might have been added to cache
- * after truncate_inode_pages. But we needn't bother
- * if it's being fully truncated to zero-length: the
- * nrpages check is efficient enough in that case.
- */
- if (newsize) {
- struct shmem_inode_info *info = SHMEM_I(inode);
- spin_lock(&info->lock);
- info->flags &= ~SHMEM_PAGEIN;
- spin_unlock(&info->lock);
- }
- }
if (newsize != oldsize) {
i_size_write(inode, newsize);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
@@ -822,8 +532,6 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
/* unmap again to remove racily COWed private pages */
unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
}
- if (page)
- page_cache_release(page);
}
setattr_copy(inode, attr);
@@ -848,7 +556,8 @@ static void shmem_evict_inode(struct inode *inode)
list_del_init(&info->swaplist);
mutex_unlock(&shmem_swaplist_mutex);
}
- }
+ } else
+ kfree(info->symlink);
list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
kfree(xattr->name);
@@ -859,106 +568,27 @@ static void shmem_evict_inode(struct inode *inode)
end_writeback(inode);
}
-static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
-{
- swp_entry_t *ptr;
-
- for (ptr = dir; ptr < edir; ptr++) {
- if (ptr->val == entry.val)
- return ptr - dir;
- }
- return -1;
-}
-
-static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
+/*
+ * If swap found in inode, free it and move page from swapcache to filecache.
+ */
+static int shmem_unuse_inode(struct shmem_inode_info *info,
+ swp_entry_t swap, struct page *page)
{
- struct address_space *mapping;
- unsigned long idx;
- unsigned long size;
- unsigned long limit;
- unsigned long stage;
- struct page **dir;
- struct page *subdir;
- swp_entry_t *ptr;
- int offset;
+ struct address_space *mapping = info->vfs_inode.i_mapping;
+ void *radswap;
+ pgoff_t index;
int error;
- idx = 0;
- ptr = info->i_direct;
- spin_lock(&info->lock);
- if (!info->swapped) {
- list_del_init(&info->swaplist);
- goto lost2;
- }
- limit = info->next_index;
- size = limit;
- if (size > SHMEM_NR_DIRECT)
- size = SHMEM_NR_DIRECT;
- offset = shmem_find_swp(entry, ptr, ptr+size);
- if (offset >= 0) {
- shmem_swp_balance_unmap();
- goto found;
- }
- if (!info->i_indirect)
- goto lost2;
-
- dir = shmem_dir_map(info->i_indirect);
- stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
-
- for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
- if (unlikely(idx == stage)) {
- shmem_dir_unmap(dir-1);
- if (cond_resched_lock(&info->lock)) {
- /* check it has not been truncated */
- if (limit > info->next_index) {
- limit = info->next_index;
- if (idx >= limit)
- goto lost2;
- }
- }
- dir = shmem_dir_map(info->i_indirect) +
- ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
- while (!*dir) {
- dir++;
- idx += ENTRIES_PER_PAGEPAGE;
- if (idx >= limit)
- goto lost1;
- }
- stage = idx + ENTRIES_PER_PAGEPAGE;
- subdir = *dir;
- shmem_dir_unmap(dir);
- dir = shmem_dir_map(subdir);
- }
- subdir = *dir;
- if (subdir && page_private(subdir)) {
- ptr = shmem_swp_map(subdir);
- size = limit - idx;
- if (size > ENTRIES_PER_PAGE)
- size = ENTRIES_PER_PAGE;
- offset = shmem_find_swp(entry, ptr, ptr+size);
- shmem_swp_unmap(ptr);
- if (offset >= 0) {
- shmem_dir_unmap(dir);
- ptr = shmem_swp_map(subdir);
- goto found;
- }
- }
- }
-lost1:
- shmem_dir_unmap(dir-1);
-lost2:
- spin_unlock(&info->lock);
- return 0;
-found:
- idx += offset;
- ptr += offset;
+ radswap = swp_to_radix_entry(swap);
+ index = radix_tree_locate_item(&mapping->page_tree, radswap);
+ if (index == -1)
+ return 0;
/*
* Move _head_ to start search for next from here.
* But be careful: shmem_evict_inode checks list_empty without taking
* mutex, and there's an instant in list_move_tail when info->swaplist
- * would appear empty, if it were the only one on shmem_swaplist. We
- * could avoid doing it if inode NULL; or use this minor optimization.
+ * would appear empty, if it were the only one on shmem_swaplist.
*/
if (shmem_swaplist.next != &info->swaplist)
list_move_tail(&shmem_swaplist, &info->swaplist);
@@ -968,29 +598,34 @@ found:
* but also to hold up shmem_evict_inode(): so inode cannot be freed
* beneath us (pagelock doesn't help until the page is in pagecache).
*/
- mapping = info->vfs_inode.i_mapping;
- error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
+ error = shmem_add_to_page_cache(page, mapping, index,
+ GFP_NOWAIT, radswap);
/* which does mem_cgroup_uncharge_cache_page on error */
if (error != -ENOMEM) {
+ /*
+ * Truncation and eviction use free_swap_and_cache(), which
+ * only does trylock page: if we raced, best clean up here.
+ */
delete_from_swap_cache(page);
set_page_dirty(page);
- info->flags |= SHMEM_PAGEIN;
- shmem_swp_set(info, ptr, 0);
- swap_free(entry);
+ if (!error) {
+ spin_lock(&info->lock);
+ info->swapped--;
+ spin_unlock(&info->lock);
+ swap_free(swap);
+ }
error = 1; /* not an error, but entry was found */
}
- shmem_swp_unmap(ptr);
- spin_unlock(&info->lock);
return error;
}
/*
- * shmem_unuse() search for an eventually swapped out shmem page.
+ * Search through swapped inodes to find and replace swap by page.
*/
-int shmem_unuse(swp_entry_t entry, struct page *page)
+int shmem_unuse(swp_entry_t swap, struct page *page)
{
- struct list_head *p, *next;
+ struct list_head *this, *next;
struct shmem_inode_info *info;
int found = 0;
int error;
@@ -999,32 +634,25 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
* Charge page using GFP_KERNEL while we can wait, before taking
* the shmem_swaplist_mutex which might hold up shmem_writepage().
* Charged back to the user (not to caller) when swap account is used.
- * add_to_page_cache() will be called with GFP_NOWAIT.
*/
error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
if (error)
goto out;
- /*
- * Try to preload while we can wait, to not make a habit of
- * draining atomic reserves; but don't latch on to this cpu,
- * it's okay if sometimes we get rescheduled after this.
- */
- error = radix_tree_preload(GFP_KERNEL);
- if (error)
- goto uncharge;
- radix_tree_preload_end();
+ /* No radix_tree_preload: swap entry keeps a place for page in tree */
mutex_lock(&shmem_swaplist_mutex);
- list_for_each_safe(p, next, &shmem_swaplist) {
- info = list_entry(p, struct shmem_inode_info, swaplist);
- found = shmem_unuse_inode(info, entry, page);
+ list_for_each_safe(this, next, &shmem_swaplist) {
+ info = list_entry(this, struct shmem_inode_info, swaplist);
+ if (info->swapped)
+ found = shmem_unuse_inode(info, swap, page);
+ else
+ list_del_init(&info->swaplist);
cond_resched();
if (found)
break;
}
mutex_unlock(&shmem_swaplist_mutex);
-uncharge:
if (!found)
mem_cgroup_uncharge_cache_page(page);
if (found < 0)
@@ -1041,10 +669,10 @@ out:
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
{
struct shmem_inode_info *info;
- swp_entry_t *entry, swap;
struct address_space *mapping;
- unsigned long index;
struct inode *inode;
+ swp_entry_t swap;
+ pgoff_t index;
BUG_ON(!PageLocked(page));
mapping = page->mapping;
@@ -1073,50 +701,32 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
- * if it's not already there. Do it now because we cannot take
- * mutex while holding spinlock, and must do so before the page
- * is moved to swap cache, when its pagelock no longer protects
+ * if it's not already there. Do it now before the page is
+ * moved to swap cache, when its pagelock no longer protects
* the inode from eviction. But don't unlock the mutex until
- * we've taken the spinlock, because shmem_unuse_inode() will
- * prune a !swapped inode from the swaplist under both locks.
+ * we've incremented swapped, because shmem_unuse_inode() will
+ * prune a !swapped inode from the swaplist under this mutex.
*/
mutex_lock(&shmem_swaplist_mutex);
if (list_empty(&info->swaplist))
list_add_tail(&info->swaplist, &shmem_swaplist);
- spin_lock(&info->lock);
- mutex_unlock(&shmem_swaplist_mutex);
-
- if (index >= info->next_index) {
- BUG_ON(!(info->flags & SHMEM_TRUNCATE));
- goto unlock;
- }
- entry = shmem_swp_entry(info, index, NULL);
- if (entry->val) {
- WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
- free_swap_and_cache(*entry);
- shmem_swp_set(info, entry, 0);
- }
- shmem_recalc_inode(inode);
-
if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
- delete_from_page_cache(page);
- shmem_swp_set(info, entry, swap.val);
- shmem_swp_unmap(entry);
swap_shmem_alloc(swap);
+ shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
+
+ spin_lock(&info->lock);
+ info->swapped++;
+ shmem_recalc_inode(inode);
spin_unlock(&info->lock);
+
+ mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
return 0;
}
- shmem_swp_unmap(entry);
-unlock:
- spin_unlock(&info->lock);
- /*
- * add_to_swap_cache() doesn't return -EEXIST, so we can safely
- * clear SWAP_HAS_CACHE flag.
- */
+ mutex_unlock(&shmem_swaplist_mutex);
swapcache_free(swap, NULL);
redirty:
set_page_dirty(page);
@@ -1153,35 +763,33 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
}
#endif /* CONFIG_TMPFS */
-static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
- struct shmem_inode_info *info, unsigned long idx)
+static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index)
{
struct mempolicy mpol, *spol;
struct vm_area_struct pvma;
- struct page *page;
spol = mpol_cond_copy(&mpol,
- mpol_shared_policy_lookup(&info->policy, idx));
+ mpol_shared_policy_lookup(&info->policy, index));
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
- pvma.vm_pgoff = idx;
+ pvma.vm_pgoff = index;
pvma.vm_ops = NULL;
pvma.vm_policy = spol;
- page = swapin_readahead(entry, gfp, &pvma, 0);
- return page;
+ return swapin_readahead(swap, gfp, &pvma, 0);
}
static struct page *shmem_alloc_page(gfp_t gfp,
- struct shmem_inode_info *info, unsigned long idx)
+ struct shmem_inode_info *info, pgoff_t index)
{
struct vm_area_struct pvma;
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
- pvma.vm_pgoff = idx;
+ pvma.vm_pgoff = index;
pvma.vm_ops = NULL;
- pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
/*
* alloc_page_vma() will drop the shared policy reference
@@ -1190,19 +798,19 @@ static struct page *shmem_alloc_page(gfp_t gfp,
}
#else /* !CONFIG_NUMA */
#ifdef CONFIG_TMPFS
-static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
+static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
{
}
#endif /* CONFIG_TMPFS */
-static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
- struct shmem_inode_info *info, unsigned long idx)
+static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index)
{
- return swapin_readahead(entry, gfp, NULL, 0);
+ return swapin_readahead(swap, gfp, NULL, 0);
}
static inline struct page *shmem_alloc_page(gfp_t gfp,
- struct shmem_inode_info *info, unsigned long idx)
+ struct shmem_inode_info *info, pgoff_t index)
{
return alloc_page(gfp);
}
@@ -1222,243 +830,190 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache
*/
-static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
+static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
{
struct address_space *mapping = inode->i_mapping;
- struct shmem_inode_info *info = SHMEM_I(inode);
+ struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo;
struct page *page;
- struct page *prealloc_page = NULL;
- swp_entry_t *entry;
swp_entry_t swap;
int error;
- int ret;
+ int once = 0;
- if (idx >= SHMEM_MAX_INDEX)
+ if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
return -EFBIG;
repeat:
- page = find_lock_page(mapping, idx);
- if (page) {
+ swap.val = 0;
+ page = find_lock_page(mapping, index);
+ if (radix_tree_exceptional_entry(page)) {
+ swap = radix_to_swp_entry(page);
+ page = NULL;
+ }
+
+ if (sgp != SGP_WRITE &&
+ ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+ error = -EINVAL;
+ goto failed;
+ }
+
+ if (page || (sgp == SGP_READ && !swap.val)) {
/*
* Once we can get the page lock, it must be uptodate:
* if there were an error in reading back from swap,
* the page would not be inserted into the filecache.
*/
- BUG_ON(!PageUptodate(page));
- goto done;
+ BUG_ON(page && !PageUptodate(page));
+ *pagep = page;
+ return 0;
}
/*
- * Try to preload while we can wait, to not make a habit of
- * draining atomic reserves; but don't latch on to this cpu.
+ * Fast cache lookup did not find it:
+ * bring it back from swap or allocate.
*/
- error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
- if (error)
- goto out;
- radix_tree_preload_end();
-
- if (sgp != SGP_READ && !prealloc_page) {
- prealloc_page = shmem_alloc_page(gfp, info, idx);
- if (prealloc_page) {
- SetPageSwapBacked(prealloc_page);
- if (mem_cgroup_cache_charge(prealloc_page,
- current->mm, GFP_KERNEL)) {
- page_cache_release(prealloc_page);
- prealloc_page = NULL;
- }
- }
- }
-
- spin_lock(&info->lock);
- shmem_recalc_inode(inode);
- entry = shmem_swp_alloc(info, idx, sgp, gfp);
- if (IS_ERR(entry)) {
- spin_unlock(&info->lock);
- error = PTR_ERR(entry);
- goto out;
- }
- swap = *entry;
+ info = SHMEM_I(inode);
+ sbinfo = SHMEM_SB(inode->i_sb);
if (swap.val) {
/* Look it up and read it in.. */
page = lookup_swap_cache(swap);
if (!page) {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
/* here we actually do the io */
if (fault_type)
*fault_type |= VM_FAULT_MAJOR;
- page = shmem_swapin(swap, gfp, info, idx);
+ page = shmem_swapin(swap, gfp, info, index);
if (!page) {
- spin_lock(&info->lock);
- entry = shmem_swp_alloc(info, idx, sgp, gfp);
- if (IS_ERR(entry))
- error = PTR_ERR(entry);
- else {
- if (entry->val == swap.val)
- error = -ENOMEM;
- shmem_swp_unmap(entry);
- }
- spin_unlock(&info->lock);
- if (error)
- goto out;
- goto repeat;
+ error = -ENOMEM;
+ goto failed;
}
- wait_on_page_locked(page);
- page_cache_release(page);
- goto repeat;
}
/* We have to do this with page locked to prevent races */
- if (!trylock_page(page)) {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- wait_on_page_locked(page);
- page_cache_release(page);
- goto repeat;
- }
- if (PageWriteback(page)) {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- wait_on_page_writeback(page);
- unlock_page(page);
- page_cache_release(page);
- goto repeat;
- }
+ lock_page(page);
if (!PageUptodate(page)) {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- unlock_page(page);
- page_cache_release(page);
error = -EIO;
- goto out;
+ goto failed;
}
-
- error = add_to_page_cache_locked(page, mapping,
- idx, GFP_NOWAIT);
- if (error) {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- if (error == -ENOMEM) {
- /*
- * reclaim from proper memory cgroup and
- * call memcg's OOM if needed.
- */
- error = mem_cgroup_shmem_charge_fallback(
- page, current->mm, gfp);
- if (error) {
- unlock_page(page);
- page_cache_release(page);
- goto out;
- }
- }
- unlock_page(page);
- page_cache_release(page);
- goto repeat;
+ wait_on_page_writeback(page);
+
+ /* Someone may have already done it for us */
+ if (page->mapping) {
+ if (page->mapping == mapping &&
+ page->index == index)
+ goto done;
+ error = -EEXIST;
+ goto failed;
}
- info->flags |= SHMEM_PAGEIN;
- shmem_swp_set(info, entry, 0);
- shmem_swp_unmap(entry);
- delete_from_swap_cache(page);
+ error = mem_cgroup_cache_charge(page, current->mm,
+ gfp & GFP_RECLAIM_MASK);
+ if (!error)
+ error = shmem_add_to_page_cache(page, mapping, index,
+ gfp, swp_to_radix_entry(swap));
+ if (error)
+ goto failed;
+
+ spin_lock(&info->lock);
+ info->swapped--;
+ shmem_recalc_inode(inode);
spin_unlock(&info->lock);
+
+ delete_from_swap_cache(page);
set_page_dirty(page);
swap_free(swap);
- } else if (sgp == SGP_READ) {
- shmem_swp_unmap(entry);
- page = find_get_page(mapping, idx);
- if (page && !trylock_page(page)) {
- spin_unlock(&info->lock);
- wait_on_page_locked(page);
- page_cache_release(page);
- goto repeat;
+ } else {
+ if (shmem_acct_block(info->flags)) {
+ error = -ENOSPC;
+ goto failed;
}
- spin_unlock(&info->lock);
-
- } else if (prealloc_page) {
- shmem_swp_unmap(entry);
- sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
- sbinfo->max_blocks) >= 0 ||
- shmem_acct_block(info->flags))
- goto nospace;
+ sbinfo->max_blocks) >= 0) {
+ error = -ENOSPC;
+ goto unacct;
+ }
percpu_counter_inc(&sbinfo->used_blocks);
- inode->i_blocks += BLOCKS_PER_PAGE;
- } else if (shmem_acct_block(info->flags))
- goto nospace;
-
- page = prealloc_page;
- prealloc_page = NULL;
-
- entry = shmem_swp_alloc(info, idx, sgp, gfp);
- if (IS_ERR(entry))
- error = PTR_ERR(entry);
- else {
- swap = *entry;
- shmem_swp_unmap(entry);
}
- ret = error || swap.val;
- if (ret)
- mem_cgroup_uncharge_cache_page(page);
- else
- ret = add_to_page_cache_lru(page, mapping,
- idx, GFP_NOWAIT);
- /*
- * At add_to_page_cache_lru() failure,
- * uncharge will be done automatically.
- */
- if (ret) {
- shmem_unacct_blocks(info->flags, 1);
- shmem_free_blocks(inode, 1);
- spin_unlock(&info->lock);
- page_cache_release(page);
- if (error)
- goto out;
- goto repeat;
+
+ page = shmem_alloc_page(gfp, info, index);
+ if (!page) {
+ error = -ENOMEM;
+ goto decused;
}
- info->flags |= SHMEM_PAGEIN;
+ SetPageSwapBacked(page);
+ __set_page_locked(page);
+ error = mem_cgroup_cache_charge(page, current->mm,
+ gfp & GFP_RECLAIM_MASK);
+ if (!error)
+ error = shmem_add_to_page_cache(page, mapping, index,
+ gfp, NULL);
+ if (error)
+ goto decused;
+ lru_cache_add_anon(page);
+
+ spin_lock(&info->lock);
info->alloced++;
+ inode->i_blocks += BLOCKS_PER_PAGE;
+ shmem_recalc_inode(inode);
spin_unlock(&info->lock);
+
clear_highpage(page);
flush_dcache_page(page);
SetPageUptodate(page);
if (sgp == SGP_DIRTY)
set_page_dirty(page);
-
- } else {
- spin_unlock(&info->lock);
- error = -ENOMEM;
- goto out;
}
done:
- *pagep = page;
- error = 0;
-out:
- if (prealloc_page) {
- mem_cgroup_uncharge_cache_page(prealloc_page);
- page_cache_release(prealloc_page);
+ /* Perhaps the file has been truncated since we checked */
+ if (sgp != SGP_WRITE &&
+ ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+ error = -EINVAL;
+ goto trunc;
}
- return error;
+ *pagep = page;
+ return 0;
-nospace:
/*
- * Perhaps the page was brought in from swap between find_lock_page
- * and taking info->lock? We allow for that at add_to_page_cache_lru,
- * but must also avoid reporting a spurious ENOSPC while working on a
- * full tmpfs.
+ * Error recovery.
*/
- page = find_get_page(mapping, idx);
+trunc:
+ ClearPageDirty(page);
+ delete_from_page_cache(page);
+ spin_lock(&info->lock);
+ info->alloced--;
+ inode->i_blocks -= BLOCKS_PER_PAGE;
spin_unlock(&info->lock);
+decused:
+ if (sbinfo->max_blocks)
+ percpu_counter_add(&sbinfo->used_blocks, -1);
+unacct:
+ shmem_unacct_blocks(info->flags, 1);
+failed:
+ if (swap.val && error != -EINVAL) {
+ struct page *test = find_get_page(mapping, index);
+ if (test && !radix_tree_exceptional_entry(test))
+ page_cache_release(test);
+ /* Have another try if the entry has changed */
+ if (test != swp_to_radix_entry(swap))
+ error = -EEXIST;
+ }
if (page) {
+ unlock_page(page);
page_cache_release(page);
+ }
+ if (error == -ENOSPC && !once++) {
+ info = SHMEM_I(inode);
+ spin_lock(&info->lock);
+ shmem_recalc_inode(inode);
+ spin_unlock(&info->lock);
goto repeat;
}
- error = -ENOSPC;
- goto out;
+ if (error == -EEXIST)
+ goto repeat;
+ return error;
}
static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -1467,9 +1022,6 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int error;
int ret = VM_FAULT_LOCKED;
- if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
- return VM_FAULT_SIGBUS;
-
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
if (error)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1482,20 +1034,20 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
#ifdef CONFIG_NUMA
-static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
+static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
{
- struct inode *i = vma->vm_file->f_path.dentry->d_inode;
- return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
}
static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr)
{
- struct inode *i = vma->vm_file->f_path.dentry->d_inode;
- unsigned long idx;
+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ pgoff_t index;
- idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
+ index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
}
#endif
@@ -1593,7 +1145,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
#ifdef CONFIG_TMPFS
static const struct inode_operations shmem_symlink_inode_operations;
-static const struct inode_operations shmem_symlink_inline_operations;
+static const struct inode_operations shmem_short_symlink_operations;
static int
shmem_write_begin(struct file *file, struct address_space *mapping,
@@ -1626,7 +1178,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
- unsigned long index, offset;
+ pgoff_t index;
+ unsigned long offset;
enum sgp_type sgp = SGP_READ;
/*
@@ -1642,7 +1195,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
for (;;) {
struct page *page = NULL;
- unsigned long end_index, nr, ret;
+ pgoff_t end_index;
+ unsigned long nr, ret;
loff_t i_size = i_size_read(inode);
end_index = i_size >> PAGE_CACHE_SHIFT;
@@ -1880,8 +1434,9 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = NAME_MAX;
if (sbinfo->max_blocks) {
buf->f_blocks = sbinfo->max_blocks;
- buf->f_bavail = buf->f_bfree =
- sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
+ buf->f_bavail =
+ buf->f_bfree = sbinfo->max_blocks -
+ percpu_counter_sum(&sbinfo->used_blocks);
}
if (sbinfo->max_inodes) {
buf->f_files = sbinfo->max_inodes;
@@ -2055,10 +1610,13 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
info = SHMEM_I(inode);
inode->i_size = len-1;
- if (len <= SHMEM_SYMLINK_INLINE_LEN) {
- /* do it inline */
- memcpy(info->inline_symlink, symname, len);
- inode->i_op = &shmem_symlink_inline_operations;
+ if (len <= SHORT_SYMLINK_LEN) {
+ info->symlink = kmemdup(symname, len, GFP_KERNEL);
+ if (!info->symlink) {
+ iput(inode);
+ return -ENOMEM;
+ }
+ inode->i_op = &shmem_short_symlink_operations;
} else {
error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
if (error) {
@@ -2081,17 +1639,17 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
return 0;
}
-static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
+static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
{
- nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
+ nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
return NULL;
}
static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
- int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
- nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
+ int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
+ nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
if (page)
unlock_page(page);
return page;
@@ -2202,7 +1760,6 @@ out:
return err;
}
-
static const struct xattr_handler *shmem_xattr_handlers[] = {
#ifdef CONFIG_TMPFS_POSIX_ACL
&generic_acl_access_handler,
@@ -2332,9 +1889,9 @@ static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
}
#endif /* CONFIG_TMPFS_XATTR */
-static const struct inode_operations shmem_symlink_inline_operations = {
+static const struct inode_operations shmem_short_symlink_operations = {
.readlink = generic_readlink,
- .follow_link = shmem_follow_link_inline,
+ .follow_link = shmem_follow_short_symlink,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
@@ -2534,8 +2091,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
if (config.max_inodes < inodes)
goto out;
/*
- * Those tests also disallow limited->unlimited while any are in
- * use, so i_blocks will always be zero when max_blocks is zero;
+ * Those tests disallow limited->unlimited while any are in use;
* but we must separately disallow unlimited->limited, because
* in that case we have no record of how much is already in use.
*/
@@ -2627,7 +2183,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
goto failed;
sbinfo->free_inodes = sbinfo->max_inodes;
- sb->s_maxbytes = SHMEM_MAX_BYTES;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC;
@@ -2662,14 +2218,14 @@ static struct kmem_cache *shmem_inode_cachep;
static struct inode *shmem_alloc_inode(struct super_block *sb)
{
- struct shmem_inode_info *p;
- p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
- if (!p)
+ struct shmem_inode_info *info;
+ info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
+ if (!info)
return NULL;
- return &p->vfs_inode;
+ return &info->vfs_inode;
}
-static void shmem_i_callback(struct rcu_head *head)
+static void shmem_destroy_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
INIT_LIST_HEAD(&inode->i_dentry);
@@ -2678,29 +2234,26 @@ static void shmem_i_callback(struct rcu_head *head)
static void shmem_destroy_inode(struct inode *inode)
{
- if ((inode->i_mode & S_IFMT) == S_IFREG) {
- /* only struct inode is valid if it's an inline symlink */
+ if ((inode->i_mode & S_IFMT) == S_IFREG)
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
- }
- call_rcu(&inode->i_rcu, shmem_i_callback);
+ call_rcu(&inode->i_rcu, shmem_destroy_callback);
}
-static void init_once(void *foo)
+static void shmem_init_inode(void *foo)
{
- struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
-
- inode_init_once(&p->vfs_inode);
+ struct shmem_inode_info *info = foo;
+ inode_init_once(&info->vfs_inode);
}
-static int init_inodecache(void)
+static int shmem_init_inodecache(void)
{
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
sizeof(struct shmem_inode_info),
- 0, SLAB_PANIC, init_once);
+ 0, SLAB_PANIC, shmem_init_inode);
return 0;
}
-static void destroy_inodecache(void)
+static void shmem_destroy_inodecache(void)
{
kmem_cache_destroy(shmem_inode_cachep);
}
@@ -2797,21 +2350,20 @@ static const struct vm_operations_struct shmem_vm_ops = {
#endif
};
-
static struct dentry *shmem_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, shmem_fill_super);
}
-static struct file_system_type tmpfs_fs_type = {
+static struct file_system_type shmem_fs_type = {
.owner = THIS_MODULE,
.name = "tmpfs",
.mount = shmem_mount,
.kill_sb = kill_litter_super,
};
-int __init init_tmpfs(void)
+int __init shmem_init(void)
{
int error;
@@ -2819,18 +2371,18 @@ int __init init_tmpfs(void)
if (error)
goto out4;
- error = init_inodecache();
+ error = shmem_init_inodecache();
if (error)
goto out3;
- error = register_filesystem(&tmpfs_fs_type);
+ error = register_filesystem(&shmem_fs_type);
if (error) {
printk(KERN_ERR "Could not register tmpfs\n");
goto out2;
}
- shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
- tmpfs_fs_type.name, NULL);
+ shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
+ shmem_fs_type.name, NULL);
if (IS_ERR(shm_mnt)) {
error = PTR_ERR(shm_mnt);
printk(KERN_ERR "Could not kern_mount tmpfs\n");
@@ -2839,9 +2391,9 @@ int __init init_tmpfs(void)
return 0;
out1:
- unregister_filesystem(&tmpfs_fs_type);
+ unregister_filesystem(&shmem_fs_type);
out2:
- destroy_inodecache();
+ shmem_destroy_inodecache();
out3:
bdi_destroy(&shmem_backing_dev_info);
out4:
@@ -2849,45 +2401,6 @@ out4:
return error;
}
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-/**
- * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
- * @inode: the inode to be searched
- * @pgoff: the offset to be searched
- * @pagep: the pointer for the found page to be stored
- * @ent: the pointer for the found swap entry to be stored
- *
- * If a page is found, refcount of it is incremented. Callers should handle
- * these refcount.
- */
-void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
- struct page **pagep, swp_entry_t *ent)
-{
- swp_entry_t entry = { .val = 0 }, *ptr;
- struct page *page = NULL;
- struct shmem_inode_info *info = SHMEM_I(inode);
-
- if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
- goto out;
-
- spin_lock(&info->lock);
- ptr = shmem_swp_entry(info, pgoff, NULL);
-#ifdef CONFIG_SWAP
- if (ptr && ptr->val) {
- entry.val = ptr->val;
- page = find_get_page(&swapper_space, entry.val);
- } else
-#endif
- page = find_get_page(inode->i_mapping, pgoff);
- if (ptr)
- shmem_swp_unmap(ptr);
- spin_unlock(&info->lock);
-out:
- *pagep = page;
- *ent = entry;
-}
-#endif
-
#else /* !CONFIG_SHMEM */
/*
@@ -2901,23 +2414,23 @@ out:
#include <linux/ramfs.h>
-static struct file_system_type tmpfs_fs_type = {
+static struct file_system_type shmem_fs_type = {
.name = "tmpfs",
.mount = ramfs_mount,
.kill_sb = kill_litter_super,
};
-int __init init_tmpfs(void)
+int __init shmem_init(void)
{
- BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
+ BUG_ON(register_filesystem(&shmem_fs_type) != 0);
- shm_mnt = kern_mount(&tmpfs_fs_type);
+ shm_mnt = kern_mount(&shmem_fs_type);
BUG_ON(IS_ERR(shm_mnt));
return 0;
}
-int shmem_unuse(swp_entry_t entry, struct page *page)
+int shmem_unuse(swp_entry_t swap, struct page *page)
{
return 0;
}
@@ -2927,43 +2440,17 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
return 0;
}
-void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
- truncate_inode_pages_range(inode->i_mapping, start, end);
+ truncate_inode_pages_range(inode->i_mapping, lstart, lend);
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-/**
- * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
- * @inode: the inode to be searched
- * @pgoff: the offset to be searched
- * @pagep: the pointer for the found page to be stored
- * @ent: the pointer for the found swap entry to be stored
- *
- * If a page is found, refcount of it is incremented. Callers should handle
- * these refcount.
- */
-void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
- struct page **pagep, swp_entry_t *ent)
-{
- struct page *page = NULL;
-
- if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
- goto out;
- page = find_get_page(inode->i_mapping, pgoff);
-out:
- *pagep = page;
- *ent = (swp_entry_t){ .val = 0 };
-}
-#endif
-
#define shmem_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
#define shmem_acct_size(flags, size) 0
#define shmem_unacct_size(flags, size) do {} while (0)
-#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
#endif /* CONFIG_SHMEM */
@@ -2987,7 +2474,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
if (IS_ERR(shm_mnt))
return (void *)shm_mnt;
- if (size < 0 || size > SHMEM_MAX_BYTES)
+ if (size < 0 || size > MAX_LFS_FILESIZE)
return ERR_PTR(-EINVAL);
if (shmem_acct_size(flags, size))
diff --git a/mm/slab.c b/mm/slab.c
index 41fc5781c7c..5bfc2047afe 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -622,6 +622,51 @@ int slab_is_available(void)
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;
+static struct lock_class_key debugobj_l3_key;
+static struct lock_class_key debugobj_alc_key;
+
+static void slab_set_lock_classes(struct kmem_cache *cachep,
+ struct lock_class_key *l3_key, struct lock_class_key *alc_key,
+ int q)
+{
+ struct array_cache **alc;
+ struct kmem_list3 *l3;
+ int r;
+
+ l3 = cachep->nodelists[q];
+ if (!l3)
+ return;
+
+ lockdep_set_class(&l3->list_lock, l3_key);
+ alc = l3->alien;
+ /*
+ * FIXME: This check for BAD_ALIEN_MAGIC
+ * should go away when common slab code is taught to
+ * work even without alien caches.
+ * Currently, non NUMA code returns BAD_ALIEN_MAGIC
+ * for alloc_alien_cache,
+ */
+ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
+ return;
+ for_each_node(r) {
+ if (alc[r])
+ lockdep_set_class(&alc[r]->lock, alc_key);
+ }
+}
+
+static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
+{
+ slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
+}
+
+static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
+{
+ int node;
+
+ for_each_online_node(node)
+ slab_set_debugobj_lock_classes_node(cachep, node);
+}
+
static void init_node_lock_keys(int q)
{
struct cache_sizes *s = malloc_sizes;
@@ -630,29 +675,14 @@ static void init_node_lock_keys(int q)
return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
- struct array_cache **alc;
struct kmem_list3 *l3;
- int r;
l3 = s->cs_cachep->nodelists[q];
if (!l3 || OFF_SLAB(s->cs_cachep))
continue;
- lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
- alc = l3->alien;
- /*
- * FIXME: This check for BAD_ALIEN_MAGIC
- * should go away when common slab code is taught to
- * work even without alien caches.
- * Currently, non NUMA code returns BAD_ALIEN_MAGIC
- * for alloc_alien_cache,
- */
- if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
- continue;
- for_each_node(r) {
- if (alc[r])
- lockdep_set_class(&alc[r]->lock,
- &on_slab_alc_key);
- }
+
+ slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
+ &on_slab_alc_key, q);
}
}
@@ -671,6 +701,14 @@ static void init_node_lock_keys(int q)
static inline void init_lock_keys(void)
{
}
+
+static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
+{
+}
+
+static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
+{
+}
#endif
/*
@@ -1264,6 +1302,8 @@ static int __cpuinit cpuup_prepare(long cpu)
spin_unlock_irq(&l3->list_lock);
kfree(shared);
free_alien_cache(alien);
+ if (cachep->flags & SLAB_DEBUG_OBJECTS)
+ slab_set_debugobj_lock_classes_node(cachep, node);
}
init_node_lock_keys(node);
@@ -1626,6 +1666,9 @@ void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
+ /* Annotate slab for lockdep -- annotate the malloc caches */
+ init_lock_keys();
+
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
@@ -1636,9 +1679,6 @@ void __init kmem_cache_init_late(void)
/* Done! */
g_cpucache_up = FULL;
- /* Annotate slab for lockdep -- annotate the malloc caches */
- init_lock_keys();
-
/*
* Register a cpu startup notifier callback that initializes
* cpu_cache_get for all new cpus
@@ -2426,6 +2466,16 @@ kmem_cache_create (const char *name, size_t size, size_t align,
goto oops;
}
+ if (flags & SLAB_DEBUG_OBJECTS) {
+ /*
+ * Would deadlock through slab_destroy()->call_rcu()->
+ * debug_object_activate()->kmem_cache_alloc().
+ */
+ WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
+
+ slab_set_debugobj_lock_classes(cachep);
+ }
+
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
oops:
@@ -3398,7 +3448,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
- if (nodeid == -1)
+ if (nodeid == NUMA_NO_NODE)
nodeid = slab_node;
if (unlikely(!cachep->nodelists[nodeid])) {
@@ -3929,7 +3979,7 @@ fail:
struct ccupdate_struct {
struct kmem_cache *cachep;
- struct array_cache *new[NR_CPUS];
+ struct array_cache *new[0];
};
static void do_ccupdate_local(void *info)
@@ -3951,7 +4001,8 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
struct ccupdate_struct *new;
int i;
- new = kzalloc(sizeof(*new), gfp);
+ new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
+ gfp);
if (!new)
return -ENOMEM;
diff --git a/mm/slub.c b/mm/slub.c
index 2dc22160aff..3b3f17bc0d1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -675,7 +675,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes)
return check_bytes8(start, value, bytes);
value64 = value | value << 8 | value << 16 | value << 24;
- value64 = value64 | value64 << 32;
+ value64 = (value64 & 0xffffffff) | value64 << 32;
prefix = 8 - ((unsigned long)start) % 8;
if (prefix) {
@@ -1508,7 +1508,7 @@ static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
n->nr_partial++;
- if (tail)
+ if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial);
else
list_add(&page->lru, &n->partial);
@@ -1755,13 +1755,13 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
enum slab_modes l = M_NONE, m = M_NONE;
void *freelist;
void *nextfree;
- int tail = 0;
+ int tail = DEACTIVATE_TO_HEAD;
struct page new;
struct page old;
if (page->freelist) {
stat(s, DEACTIVATE_REMOTE_FREES);
- tail = 1;
+ tail = DEACTIVATE_TO_TAIL;
}
c->tid = next_tid(c->tid);
@@ -1828,7 +1828,7 @@ redo:
new.frozen = 0;
- if (!new.inuse && n->nr_partial < s->min_partial)
+ if (!new.inuse && n->nr_partial > s->min_partial)
m = M_FREE;
else if (new.freelist) {
m = M_PARTIAL;
@@ -1867,7 +1867,7 @@ redo:
if (m == M_PARTIAL) {
add_partial(n, page, tail);
- stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
+ stat(s, tail);
} else if (m == M_FULL) {
@@ -2351,7 +2351,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if (unlikely(!prior)) {
remove_full(s, page);
- add_partial(n, page, 0);
+ add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
}
@@ -2361,11 +2361,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
slab_empty:
if (prior) {
/*
- * Slab still on the partial list.
+ * Slab on the partial list.
*/
remove_partial(n, page);
stat(s, FREE_REMOVE_PARTIAL);
- }
+ } else
+ /* Slab must be on the full list */
+ remove_full(s, page);
spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
@@ -2667,7 +2669,7 @@ static void early_kmem_cache_node_alloc(int node)
init_kmem_cache_node(n, kmem_cache_node);
inc_slabs_node(kmem_cache_node, node, page->objects);
- add_partial(n, page, 0);
+ add_partial(n, page, DEACTIVATE_TO_HEAD);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1b8c3390724..17bc224bce6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1924,20 +1924,24 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
/*
* Find out how many pages are allowed for a single swap
- * device. There are two limiting factors: 1) the number of
- * bits for the swap offset in the swp_entry_t type and
- * 2) the number of bits in the a swap pte as defined by
- * the different architectures. In order to find the
- * largest possible bit mask a swap entry with swap type 0
+ * device. There are three limiting factors: 1) the number
+ * of bits for the swap offset in the swp_entry_t type, and
+ * 2) the number of bits in the swap pte as defined by the
+ * the different architectures, and 3) the number of free bits
+ * in an exceptional radix_tree entry. In order to find the
+ * largest possible bit mask, a swap entry with swap type 0
* and swap offset ~0UL is created, encoded to a swap pte,
- * decoded to a swp_entry_t again and finally the swap
+ * decoded to a swp_entry_t again, and finally the swap
* offset is extracted. This will mask all the bits from
* the initial ~0UL mask that can't be encoded in either
* the swp_entry_t or the architecture definition of a
- * swap pte.
+ * swap pte. Then the same is done for a radix_tree entry.
*/
maxpages = swp_offset(pte_to_swp_entry(
- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+ swp_entry_to_pte(swp_entry(0, ~0UL))));
+ maxpages = swp_offset(radix_to_swp_entry(
+ swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
+
if (maxpages > swap_header->info.last_page) {
maxpages = swap_header->info.last_page + 1;
/* p->max is an unsigned int: don't overflow it */
diff --git a/mm/truncate.c b/mm/truncate.c
index 232eb2736a7..b40ac6d4e86 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -336,6 +336,14 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
unsigned long count = 0;
int i;
+ /*
+ * Note: this function may get called on a shmem/tmpfs mapping:
+ * pagevec_lookup() might then return 0 prematurely (because it
+ * got a gangful of swap entries); but it's hardly worth worrying
+ * about - it can rarely have anything to free from such a mapping
+ * (most pages are dirty), and already skips over any difficulties.
+ */
+
pagevec_init(&pvec, 0);
while (index <= end && pagevec_lookup(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 2252c2085da..52cfd0c3ea7 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -242,8 +242,6 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
if (brdev->payload == p_bridged) {
skb_push(skb, 2);
memset(skb->data, 0, 2);
- } else { /* p_routed */
- skb_pull(skb, ETH_HLEN);
}
}
skb_debug(skb);
diff --git a/net/core/Makefile b/net/core/Makefile
index 8a04dd22cf7..0d357b1c4e5 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -3,7 +3,7 @@
#
obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
- gen_stats.o gen_estimator.o net_namespace.o
+ gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
new file mode 100644
index 00000000000..45329d7c9dd
--- /dev/null
+++ b/net/core/secure_seq.c
@@ -0,0 +1,184 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cryptohash.h>
+#include <linux/module.h>
+#include <linux/cache.h>
+#include <linux/random.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/string.h>
+
+#include <net/secure_seq.h>
+
+static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+
+static int __init net_secret_init(void)
+{
+ get_random_bytes(net_secret, sizeof(net_secret));
+ return 0;
+}
+late_initcall(net_secret_init);
+
+static u32 seq_scale(u32 seq)
+{
+ /*
+ * As close as possible to RFC 793, which
+ * suggests using a 250 kHz clock.
+ * Further reading shows this assumes 2 Mb/s networks.
+ * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
+ * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
+ * we also need to limit the resolution so that the u32 seq
+ * overlaps less than one time per MSL (2 minutes).
+ * Choosing a clock of 64 ns period is OK. (period of 274 s)
+ */
+ return seq + (ktime_to_ns(ktime_get_real()) >> 6);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport)
+{
+ u32 secret[MD5_MESSAGE_BYTES / 4];
+ u32 hash[MD5_DIGEST_WORDS];
+ u32 i;
+
+ memcpy(hash, saddr, 16);
+ for (i = 0; i < 4; i++)
+ secret[i] = net_secret[i] + daddr[i];
+ secret[4] = net_secret[4] +
+ (((__force u16)sport << 16) + (__force u16)dport);
+ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
+ secret[i] = net_secret[i];
+
+ md5_transform(hash, secret);
+
+ return seq_scale(hash[0]);
+}
+EXPORT_SYMBOL(secure_tcpv6_sequence_number);
+
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport)
+{
+ u32 secret[MD5_MESSAGE_BYTES / 4];
+ u32 hash[MD5_DIGEST_WORDS];
+ u32 i;
+
+ memcpy(hash, saddr, 16);
+ for (i = 0; i < 4; i++)
+ secret[i] = net_secret[i] + (__force u32) daddr[i];
+ secret[4] = net_secret[4] + (__force u32)dport;
+ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
+ secret[i] = net_secret[i];
+
+ md5_transform(hash, secret);
+
+ return hash[0];
+}
+#endif
+
+#ifdef CONFIG_INET
+__u32 secure_ip_id(__be32 daddr)
+{
+ u32 hash[MD5_DIGEST_WORDS];
+
+ hash[0] = (__force __u32) daddr;
+ hash[1] = net_secret[13];
+ hash[2] = net_secret[14];
+ hash[3] = net_secret[15];
+
+ md5_transform(hash, net_secret);
+
+ return hash[0];
+}
+
+__u32 secure_ipv6_id(const __be32 daddr[4])
+{
+ __u32 hash[4];
+
+ memcpy(hash, daddr, 16);
+ md5_transform(hash, net_secret);
+
+ return hash[0];
+}
+
+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
+{
+ u32 hash[MD5_DIGEST_WORDS];
+
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+ hash[3] = net_secret[15];
+
+ md5_transform(hash, net_secret);
+
+ return seq_scale(hash[0]);
+}
+
+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
+{
+ u32 hash[MD5_DIGEST_WORDS];
+
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = (__force u32)dport ^ net_secret[14];
+ hash[3] = net_secret[15];
+
+ md5_transform(hash, net_secret);
+
+ return hash[0];
+}
+EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
+#endif
+
+#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
+{
+ u32 hash[MD5_DIGEST_WORDS];
+ u64 seq;
+
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+ hash[3] = net_secret[15];
+
+ md5_transform(hash, net_secret);
+
+ seq = hash[0] | (((u64)hash[1]) << 32);
+ seq += ktime_to_ns(ktime_get_real());
+ seq &= (1ull << 48) - 1;
+
+ return seq;
+}
+EXPORT_SYMBOL(secure_dccp_sequence_number);
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport)
+{
+ u32 secret[MD5_MESSAGE_BYTES / 4];
+ u32 hash[MD5_DIGEST_WORDS];
+ u64 seq;
+ u32 i;
+
+ memcpy(hash, saddr, 16);
+ for (i = 0; i < 4; i++)
+ secret[i] = net_secret[i] + daddr[i];
+ secret[4] = net_secret[4] +
+ (((__force u16)sport << 16) + (__force u16)dport);
+ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
+ secret[i] = net_secret[i];
+
+ md5_transform(hash, secret);
+
+ seq = hash[0] | (((u64)hash[1]) << 32);
+ seq += ktime_to_ns(ktime_get_real());
+ seq &= (1ull << 48) - 1;
+
+ return seq;
+}
+EXPORT_SYMBOL(secure_dccpv6_sequence_number);
+#endif
+#endif
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2beda824636..27002dffe7e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1369,8 +1369,21 @@ pull_pages:
}
EXPORT_SYMBOL(__pskb_pull_tail);
-/* Copy some data bits from skb to kernel buffer. */
-
+/**
+ * skb_copy_bits - copy bits from skb to kernel buffer
+ * @skb: source skb
+ * @offset: offset in source
+ * @to: destination buffer
+ * @len: number of bytes to copy
+ *
+ * Copy the specified number of bytes from the source skb to the
+ * destination buffer.
+ *
+ * CAUTION ! :
+ * If its prototype is ever changed,
+ * check arch/{*}/net/{*}.S files,
+ * since it is called from BPF assembly code.
+ */
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
{
int start = skb_headlen(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8c36adfd191..332639b56f4 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -26,6 +26,7 @@
#include <net/timewait_sock.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
+#include <net/secure_seq.h>
#include "ackvec.h"
#include "ccid.h"
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 8dc4348774a..b74f76117dc 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -29,6 +29,7 @@
#include <net/transp_v6.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
+#include <net/secure_seq.h>
#include "dccp.h"
#include "ipv6.h"
@@ -69,13 +70,7 @@ static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
}
-static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
- __be16 sport, __be16 dport )
-{
- return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
-}
-
-static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
+static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
{
return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index f1d27f6c935..283c0a26e03 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1718,7 +1718,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
pmc->sfcount[sfmode]--;
for (j=0; j<i; j++)
- (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
+ (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
#ifdef CONFIG_IP_MULTICAST
struct ip_sf_list *psf;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 3c0369a3a66..984ec656b03 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -21,6 +21,7 @@
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
+#include <net/secure_seq.h>
#include <net/ip.h>
/*
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e38213817d0..86f13c67ea8 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -19,6 +19,7 @@
#include <linux/net.h>
#include <net/ip.h>
#include <net/inetpeer.h>
+#include <net/secure_seq.h>
/*
* Theory of operations.
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ccaaa851ab4..77d3eded665 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -204,9 +204,15 @@ static inline int ip_finish_output2(struct sk_buff *skb)
skb = skb2;
}
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
- if (neigh)
- return neigh_output(neigh, skb);
+ if (neigh) {
+ int res = neigh_output(neigh, skb);
+
+ rcu_read_unlock();
+ return res;
+ }
+ rcu_read_unlock();
if (net_ratelimit())
printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index 3e61faf23a9..f52d41ea069 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -12,6 +12,7 @@
#include <linux/ip.h>
#include <linux/netfilter.h>
+#include <net/secure_seq.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_rule.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 1730689f560..e3dec1c9f09 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
#include <linux/sysctl.h>
#endif
#include <net/atmclip.h>
+#include <net/secure_seq.h>
#define RT_FL_TOS(oldflp4) \
((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
@@ -1628,16 +1629,18 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
{
struct rtable *rt = (struct rtable *) dst;
__be32 orig_gw = rt->rt_gateway;
- struct neighbour *n;
+ struct neighbour *n, *old_n;
dst_confirm(&rt->dst);
- neigh_release(dst_get_neighbour(&rt->dst));
- dst_set_neighbour(&rt->dst, NULL);
-
rt->rt_gateway = peer->redirect_learned.a4;
- rt_bind_neighbour(rt);
- n = dst_get_neighbour(&rt->dst);
+
+ n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ old_n = xchg(&rt->dst._neighbour, n);
+ if (old_n)
+ neigh_release(old_n);
if (!n || !(n->nud_state & NUD_VALID)) {
if (n)
neigh_event_send(n, NULL);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 955b8e65b69..1c12b8ec849 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -72,6 +72,7 @@
#include <net/timewait_sock.h>
#include <net/xfrm.h>
#include <net/netdma.h>
+#include <net/secure_seq.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a55500cc0b2..f012ebd87b4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -656,7 +656,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
* layer address of our nexhop router
*/
- if (dst_get_neighbour(&rt->dst) == NULL)
+ if (dst_get_neighbour_raw(&rt->dst) == NULL)
ifa->flags &= ~IFA_F_OPTIMISTIC;
ifa->idev = idev;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 16560336eb7..9ef1831746e 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -33,6 +33,11 @@
#include <linux/errqueue.h>
#include <asm/uaccess.h>
+static inline int ipv6_mapped_addr_any(const struct in6_addr *a)
+{
+ return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0));
+}
+
int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
@@ -102,10 +107,12 @@ ipv4_connected:
ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr);
- if (ipv6_addr_any(&np->saddr))
+ if (ipv6_addr_any(&np->saddr) ||
+ ipv6_mapped_addr_any(&np->saddr))
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- if (ipv6_addr_any(&np->rcv_saddr)) {
+ if (ipv6_addr_any(&np->rcv_saddr) ||
+ ipv6_mapped_addr_any(&np->rcv_saddr)) {
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
&np->rcv_saddr);
if (sk->sk_prot->rehash)
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b5319723370..73f1a00a96a 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -20,6 +20,7 @@
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
+#include <net/secure_seq.h>
#include <net/ip.h>
int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 54a4678955b..320d91d20ad 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1455,7 +1455,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
RT6_TRACE("aging clone %p\n", rt);
return -1;
} else if ((rt->rt6i_flags & RTF_GATEWAY) &&
- (!(dst_get_neighbour(&rt->dst)->flags & NTF_ROUTER))) {
+ (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
RT6_TRACE("purging route %p via non-router but gateway\n",
rt);
return -1;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 32e5339db0c..4c882cf4e8a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -135,10 +135,15 @@ static int ip6_finish_output2(struct sk_buff *skb)
skb->len);
}
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
- if (neigh)
- return neigh_output(neigh, skb);
+ if (neigh) {
+ int res = neigh_output(neigh, skb);
+ rcu_read_unlock();
+ return res;
+ }
+ rcu_read_unlock();
IP6_INC_STATS_BH(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
@@ -975,12 +980,14 @@ static int ip6_dst_lookup_tail(struct sock *sk,
* dst entry and replace it instead with the
* dst entry of the nexthop router
*/
+ rcu_read_lock();
n = dst_get_neighbour(*dst);
if (n && !(n->nud_state & NUD_VALID)) {
struct inet6_ifaddr *ifp;
struct flowi6 fl_gw6;
int redirect;
+ rcu_read_unlock();
ifp = ipv6_get_ifaddr(net, &fl6->saddr,
(*dst)->dev, 1);
@@ -1000,6 +1007,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
if ((err = (*dst)->error))
goto out_err_release;
}
+ } else {
+ rcu_read_unlock();
}
#endif
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e8987da0666..9e69eb0ec6d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -364,7 +364,7 @@ out:
#ifdef CONFIG_IPV6_ROUTER_PREF
static void rt6_probe(struct rt6_info *rt)
{
- struct neighbour *neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
+ struct neighbour *neigh;
/*
* Okay, this does not seem to be appropriate
* for now, however, we need to check if it
@@ -373,8 +373,10 @@ static void rt6_probe(struct rt6_info *rt)
* Router Reachability Probe MUST be rate-limited
* to no more than one per minute.
*/
+ rcu_read_lock();
+ neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
if (!neigh || (neigh->nud_state & NUD_VALID))
- return;
+ goto out;
read_lock_bh(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
@@ -387,8 +389,11 @@ static void rt6_probe(struct rt6_info *rt)
target = (struct in6_addr *)&neigh->primary_key;
addrconf_addr_solict_mult(target, &mcaddr);
ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
- } else
+ } else {
read_unlock_bh(&neigh->lock);
+ }
+out:
+ rcu_read_unlock();
}
#else
static inline void rt6_probe(struct rt6_info *rt)
@@ -412,8 +417,11 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
static inline int rt6_check_neigh(struct rt6_info *rt)
{
- struct neighbour *neigh = dst_get_neighbour(&rt->dst);
+ struct neighbour *neigh;
int m;
+
+ rcu_read_lock();
+ neigh = dst_get_neighbour(&rt->dst);
if (rt->rt6i_flags & RTF_NONEXTHOP ||
!(rt->rt6i_flags & RTF_GATEWAY))
m = 1;
@@ -430,6 +438,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
read_unlock_bh(&neigh->lock);
} else
m = 0;
+ rcu_read_unlock();
return m;
}
@@ -769,7 +778,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
rt->rt6i_dst.plen = 128;
rt->rt6i_flags |= RTF_CACHE;
rt->dst.flags |= DST_HOST;
- dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour(&ort->dst)));
+ dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
}
return rt;
}
@@ -803,7 +812,7 @@ restart:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
- if (!dst_get_neighbour(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
else if (!(rt->dst.flags & DST_HOST))
nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -1587,7 +1596,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
dst_confirm(&rt->dst);
/* Duplicate redirect: silently ignore. */
- if (neigh == dst_get_neighbour(&rt->dst))
+ if (neigh == dst_get_neighbour_raw(&rt->dst))
goto out;
nrt = ip6_rt_copy(rt, dest);
@@ -1682,7 +1691,7 @@ again:
1. It is connected route. Action: COW
2. It is gatewayed route or NONEXTHOP route. Action: clone it.
*/
- if (!dst_get_neighbour(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, daddr, saddr);
else
nrt = rt6_alloc_clone(rt, daddr);
@@ -2326,6 +2335,7 @@ static int rt6_fill_node(struct net *net,
struct nlmsghdr *nlh;
long expires;
u32 table;
+ struct neighbour *n;
if (prefix) { /* user wants prefix routes only */
if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2414,8 +2424,11 @@ static int rt6_fill_node(struct net *net,
if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
goto nla_put_failure;
- if (dst_get_neighbour(&rt->dst))
- NLA_PUT(skb, RTA_GATEWAY, 16, &dst_get_neighbour(&rt->dst)->primary_key);
+ rcu_read_lock();
+ n = dst_get_neighbour(&rt->dst);
+ if (n)
+ NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
+ rcu_read_unlock();
if (rt->dst.dev)
NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
@@ -2608,12 +2621,14 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
#else
seq_puts(m, "00000000000000000000000000000000 00 ");
#endif
+ rcu_read_lock();
n = dst_get_neighbour(&rt->dst);
if (n) {
seq_printf(m, "%pi6", n->primary_key);
} else {
seq_puts(m, "00000000000000000000000000000000");
}
+ rcu_read_unlock();
seq_printf(m, " %08x %08x %08x %08x %8s\n",
rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
rt->dst.__use, rt->rt6i_flags,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 78aa53492b3..d1fb63f4aeb 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -61,6 +61,7 @@
#include <net/timewait_sock.h>
#include <net/netdma.h>
#include <net/inet_common.h>
+#include <net/secure_seq.h>
#include <asm/uaccess.h>
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index be43fd805bd..2b771dc708a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3771,6 +3771,7 @@ err_sock:
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
+ unregister_netdevice_notifier(&ip_vs_dst_notifier);
ip_vs_genl_unregister();
nf_unregister_sockopt(&ip_vs_sockopts);
LeaveFunction(2);
diff --git a/net/netlabel/Makefile b/net/netlabel/Makefile
index ea750e9df65..d2732fc952e 100644
--- a/net/netlabel/Makefile
+++ b/net/netlabel/Makefile
@@ -1,8 +1,6 @@
#
# Makefile for the NetLabel subsystem.
#
-# Feb 9, 2006, Paul Moore <paul.moore@hp.com>
-#
# base objects
obj-y := netlabel_user.o netlabel_kapi.o
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index c0519139679..96b749dacc3 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index 2b9644e19de..fdbc1d2c735 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index dd53a36d89a..6bf878335d9 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_cipso_v4.h b/net/netlabel/netlabel_cipso_v4.h
index af7f3355103..d24d774bfd6 100644
--- a/net/netlabel/netlabel_cipso_v4.h
+++ b/net/netlabel/netlabel_cipso_v4.h
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 2aa975e5452..7d8083cde34 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index 0261dda3f2d..bfcc0f7024c 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index b528dd928d3..58107d06084 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -5,7 +5,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index dff8a080924..bfa55586977 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_mgmt.h b/net/netlabel/netlabel_mgmt.h
index 8db37f4c10f..5a9f31ce579 100644
--- a/net/netlabel/netlabel_mgmt.h
+++ b/net/netlabel/netlabel_mgmt.h
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index f1ecf848e3a..e6e823656f9 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -5,7 +5,7 @@
* NetLabel system. The NetLabel system manages static and dynamic label
* mappings for network protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_unlabeled.h b/net/netlabel/netlabel_unlabeled.h
index 0bc8dc3f9e3..700af49022a 100644
--- a/net/netlabel/netlabel_unlabeled.h
+++ b/net/netlabel/netlabel_unlabeled.h
@@ -5,7 +5,7 @@
* NetLabel system. The NetLabel system manages static and dynamic label
* mappings for network protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index a3fd75ac3fa..9fae63f1029 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index f4fc4c9ad56..81969785e27 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 4536ee64383..4f5510e2bd6 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -410,7 +410,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* Return Congestion Notification only if we dropped a packet
* from this flow.
*/
- return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+ if (qlen != slot->qlen)
+ return NET_XMIT_CN;
+
+ /* As we dropped a packet, better let upper stack know this */
+ qdisc_tree_decrease_qlen(sch, 1);
+ return NET_XMIT_SUCCESS;
}
static struct sk_buff *
diff --git a/net/socket.c b/net/socket.c
index b1cbbcd9255..24a77400b65 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1871,8 +1871,14 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how)
#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
+struct used_address {
+ struct sockaddr_storage name;
+ unsigned int name_len;
+};
+
static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
- struct msghdr *msg_sys, unsigned flags, int nosec)
+ struct msghdr *msg_sys, unsigned flags,
+ struct used_address *used_address)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
@@ -1953,8 +1959,28 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
if (sock->file->f_flags & O_NONBLOCK)
msg_sys->msg_flags |= MSG_DONTWAIT;
- err = (nosec ? sock_sendmsg_nosec : sock_sendmsg)(sock, msg_sys,
- total_len);
+ /*
+ * If this is sendmmsg() and current destination address is same as
+ * previously succeeded address, omit asking LSM's decision.
+ * used_address->name_len is initialized to UINT_MAX so that the first
+ * destination address never matches.
+ */
+ if (used_address && used_address->name_len == msg_sys->msg_namelen &&
+ !memcmp(&used_address->name, msg->msg_name,
+ used_address->name_len)) {
+ err = sock_sendmsg_nosec(sock, msg_sys, total_len);
+ goto out_freectl;
+ }
+ err = sock_sendmsg(sock, msg_sys, total_len);
+ /*
+ * If this is sendmmsg() and sending to current destination address was
+ * successful, remember it.
+ */
+ if (used_address && err >= 0) {
+ used_address->name_len = msg_sys->msg_namelen;
+ memcpy(&used_address->name, msg->msg_name,
+ used_address->name_len);
+ }
out_freectl:
if (ctl_buf != ctl)
@@ -1979,7 +2005,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
if (!sock)
goto out;
- err = __sys_sendmsg(sock, msg, &msg_sys, flags, 0);
+ err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
fput_light(sock->file, fput_needed);
out:
@@ -1998,6 +2024,10 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
struct mmsghdr __user *entry;
struct compat_mmsghdr __user *compat_entry;
struct msghdr msg_sys;
+ struct used_address used_address;
+
+ if (vlen > UIO_MAXIOV)
+ vlen = UIO_MAXIOV;
datagrams = 0;
@@ -2005,27 +2035,22 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
if (!sock)
return err;
- err = sock_error(sock->sk);
- if (err)
- goto out_put;
-
+ used_address.name_len = UINT_MAX;
entry = mmsg;
compat_entry = (struct compat_mmsghdr __user *)mmsg;
+ err = 0;
while (datagrams < vlen) {
- /*
- * No need to ask LSM for more than the first datagram.
- */
if (MSG_CMSG_COMPAT & flags) {
err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
- &msg_sys, flags, datagrams);
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
- &msg_sys, flags, datagrams);
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
@@ -2037,29 +2062,11 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
++datagrams;
}
-out_put:
fput_light(sock->file, fput_needed);
- if (err == 0)
- return datagrams;
-
- if (datagrams != 0) {
- /*
- * We may send less entries than requested (vlen) if the
- * sock is non blocking...
- */
- if (err != -EAGAIN) {
- /*
- * ... or if sendmsg returns an error after we
- * send some datagrams, where we record the
- * error to return on the next call or if the
- * app asks about it using getsockopt(SO_ERROR).
- */
- sock->sk->sk_err = -err;
- }
-
+ /* We only return an error if no datagrams were able to be sent */
+ if (datagrams != 0)
return datagrams;
- }
return err;
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 9b6a4d1ea8f..f4385e45a5f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -187,6 +187,7 @@ EXPORT_SYMBOL_GPL(xprt_load_transport);
/**
* xprt_reserve_xprt - serialize write access to transports
* @task: task that is requesting access to the transport
+ * @xprt: pointer to the target transport
*
* This prevents mixing the payload of separate requests, and prevents
* transport connects from colliding with writes. No congestion control
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 28d2aa109be..e83e7fee3bc 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3464,7 +3464,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
tmp) {
enum ieee80211_band band = nla_type(attr);
- if (band < 0 || band > IEEE80211_NUM_BANDS) {
+ if (band < 0 || band >= IEEE80211_NUM_BANDS) {
err = -EINVAL;
goto out_free;
}
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 58064d9e565..791ab2e77f3 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -462,8 +462,8 @@ static struct xfrm_algo_desc ealg_list[] = {
.desc = {
.sadb_alg_id = SADB_X_EALG_AESCTR,
.sadb_alg_ivlen = 8,
- .sadb_alg_minbits = 128,
- .sadb_alg_maxbits = 256
+ .sadb_alg_minbits = 160,
+ .sadb_alg_maxbits = 288
}
},
};
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index a38316b2e3f..266a2292451 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -14,7 +14,7 @@
* Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
* <dgoeddel@trustedcs.com>
* Copyright (C) 2006, 2007, 2009 Hewlett-Packard Development Company, L.P.
- * Paul Moore <paul.moore@hp.com>
+ * Paul Moore <paul@paul-moore.com>
* Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
* Yuichi Nakamura <ynakam@hitachisoft.jp>
*
diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h
index ce23edd128b..43d507242b4 100644
--- a/security/selinux/include/netif.h
+++ b/security/selinux/include/netif.h
@@ -8,7 +8,7 @@
*
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
- * Paul Moore, <paul.moore@hp.com>
+ * Paul Moore <paul@paul-moore.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index cf2f628e6e2..8c59b8f150e 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -1,7 +1,7 @@
/*
* SELinux interface to the NetLabel subsystem
*
- * Author : Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/security/selinux/include/netnode.h b/security/selinux/include/netnode.h
index 1b94450d11d..df7a5ed6c69 100644
--- a/security/selinux/include/netnode.h
+++ b/security/selinux/include/netnode.h
@@ -6,7 +6,7 @@
* needed to reduce the lookup overhead since most of these queries happen on
* a per-packet basis.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/security/selinux/include/netport.h b/security/selinux/include/netport.h
index 8991752eaf9..4d965b83d73 100644
--- a/security/selinux/include/netport.h
+++ b/security/selinux/include/netport.h
@@ -5,7 +5,7 @@
* mapping is maintained as part of the normal policy but a fast cache is
* needed to reduce the lookup overhead.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 58cc481c93d..326f22cbe40 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -8,7 +8,7 @@
*
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
- * Paul Moore <paul.moore@hp.com>
+ * Paul Moore <paul@paul-moore.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index c3bf3ed07b0..da4b8b23328 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -4,7 +4,7 @@
* This file provides the necessary glue to tie NetLabel into the SELinux
* subsystem.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index 8b691a86318..3bf46abaa68 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -6,7 +6,7 @@
* needed to reduce the lookup overhead since most of these queries happen on
* a per-packet basis.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
* This code is heavily based on the "netif" concept originally developed by
* James Morris <jmorris@redhat.com>
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index ae76e298de7..0b62bd11246 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -5,7 +5,7 @@
* mapping is maintained as part of the normal policy but a fast cache is
* needed to reduce the lookup overhead.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
* This code is heavily based on the "netif" concept originally developed by
* James Morris <jmorris@redhat.com>
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index de7900ef53d..55d92cbb177 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -2,7 +2,7 @@
*
* Added conditional policy language extensions
*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support for the policy capability bitmap
*
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index d42951fcbe8..30f119b1d1e 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -4,7 +4,7 @@
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
/*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support to import/export the NetLabel category bitmap
*
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index e96174216bc..fbf9c5816c7 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -11,7 +11,7 @@
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
*/
/*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support to import/export the MLS label from NetLabel
*
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
index 037bf9d82d4..e4369e3e636 100644
--- a/security/selinux/ss/mls.h
+++ b/security/selinux/ss/mls.h
@@ -11,7 +11,7 @@
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
*/
/*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support to import/export the MLS label from NetLabel
*
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index d246aca3f4f..2381d0ded22 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -13,7 +13,7 @@
*
* Added conditional policy language extensions
*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support for the policy capability bitmap
*
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 973e00e34fa..f6917bc0aa0 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -13,7 +13,7 @@
*
* Added conditional policy language extensions
*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support for NetLabel
* Added support for the policy capability bitmap
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index f375eb2e195..b9c5e149903 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -9,7 +9,7 @@
*
* Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
- * Paul Moore <paul.moore@hp.com>
+ * Paul Moore <paul@paul-moore.com>
* Copyright (C) 2010 Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 5fb2e28e796..91cdf9435fe 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -342,7 +342,7 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
kfree(bufs);
return -EFAULT;
}
- bufs[ch] = compat_ptr(ptr);
+ bufs[i] = compat_ptr(ptr);
bufptr++;
}
if (dir == SNDRV_PCM_STREAM_PLAYBACK)
diff --git a/sound/core/rtctimer.c b/sound/core/rtctimer.c
index 0851cd13e30..e85e72baff9 100644
--- a/sound/core/rtctimer.c
+++ b/sound/core/rtctimer.c
@@ -22,7 +22,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
#include <linux/log2.h>
#include <sound/core.h>
#include <sound/timer.h>
diff --git a/sound/pci/asihpi/hpidspcd.c b/sound/pci/asihpi/hpidspcd.c
index 3a7afa31c1d..71d32c868c9 100644
--- a/sound/pci/asihpi/hpidspcd.c
+++ b/sound/pci/asihpi/hpidspcd.c
@@ -43,6 +43,7 @@ short hpi_dsp_code_open(u32 adapter, void *os_data, struct dsp_code *dsp_code,
struct pci_dev *dev = os_data;
struct code_header header;
char fw_name[20];
+ short err_ret = HPI_ERROR_DSP_FILE_NOT_FOUND;
int err;
sprintf(fw_name, "asihpi/dsp%04x.bin", adapter);
@@ -85,8 +86,10 @@ short hpi_dsp_code_open(u32 adapter, void *os_data, struct dsp_code *dsp_code,
HPI_DEBUG_LOG(DEBUG, "dsp code %s opened\n", fw_name);
dsp_code->pvt = kmalloc(sizeof(*dsp_code->pvt), GFP_KERNEL);
- if (!dsp_code->pvt)
- return HPI_ERROR_MEMORY_ALLOC;
+ if (!dsp_code->pvt) {
+ err_ret = HPI_ERROR_MEMORY_ALLOC;
+ goto error2;
+ }
dsp_code->pvt->dev = dev;
dsp_code->pvt->firmware = firmware;
@@ -99,7 +102,7 @@ error2:
release_firmware(firmware);
error1:
dsp_code->block_length = 0;
- return HPI_ERROR_DSP_FILE_NOT_FOUND;
+ return err_ret;
}
/*-------------------------------------------------------------------*/
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 9683f84ecdc..a32502e796d 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -177,16 +177,21 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else {
u16 __user *ptr = NULL;
u32 size = 0;
-
+ u32 adapter_present;
/* -1=no data 0=read from user mem, 1=write to user mem */
int wrflag = -1;
- u32 adapter = hm->h.adapter_index;
- struct hpi_adapter *pa = &adapters[adapter];
+ struct hpi_adapter *pa;
+
+ if (hm->h.adapter_index < HPI_MAX_ADAPTERS) {
+ pa = &adapters[hm->h.adapter_index];
+ adapter_present = pa->type;
+ } else {
+ adapter_present = 0;
+ }
- if ((adapter >= HPI_MAX_ADAPTERS) || (!pa->type)) {
- hpi_init_response(&hr->r0, HPI_OBJ_ADAPTER,
- HPI_ADAPTER_OPEN,
- HPI_ERROR_BAD_ADAPTER_NUMBER);
+ if (!adapter_present) {
+ hpi_init_response(&hr->r0, hm->h.object,
+ hm->h.function, HPI_ERROR_BAD_ADAPTER_NUMBER);
uncopied_bytes =
copy_to_user(puhr, hr, sizeof(hr->h));
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index af130ee0c45..6edc67ced90 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -521,6 +521,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024)
/* revisions >= 230 indicate AES32 card */
+#define HDSPM_MADI_ANCIENT_REV 204
#define HDSPM_MADI_OLD_REV 207
#define HDSPM_MADI_REV 210
#define HDSPM_RAYDAT_REV 211
@@ -1217,6 +1218,22 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
rate = 0;
break;
}
+
+ /* QS and DS rates normally can not be detected
+ * automatically by the card. Only exception is MADI
+ * in 96k frame mode.
+ *
+ * So if we read SS values (32 .. 48k), check for
+ * user-provided DS/QS bits in the control register
+ * and multiply the base frequency accordingly.
+ */
+ if (rate <= 48000) {
+ if (hdspm->control_register & HDSPM_QuadSpeed)
+ rate *= 4;
+ else if (hdspm->control_register &
+ HDSPM_DoubleSpeed)
+ rate *= 2;
+ }
}
break;
}
@@ -3415,6 +3432,91 @@ static int snd_hdspm_put_qs_wire(struct snd_kcontrol *kcontrol,
return change;
}
+#define HDSPM_MADI_SPEEDMODE(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .index = xindex, \
+ .info = snd_hdspm_info_madi_speedmode, \
+ .get = snd_hdspm_get_madi_speedmode, \
+ .put = snd_hdspm_put_madi_speedmode \
+}
+
+static int hdspm_madi_speedmode(struct hdspm *hdspm)
+{
+ if (hdspm->control_register & HDSPM_QuadSpeed)
+ return 2;
+ if (hdspm->control_register & HDSPM_DoubleSpeed)
+ return 1;
+ return 0;
+}
+
+static int hdspm_set_madi_speedmode(struct hdspm *hdspm, int mode)
+{
+ hdspm->control_register &= ~(HDSPM_DoubleSpeed | HDSPM_QuadSpeed);
+ switch (mode) {
+ case 0:
+ break;
+ case 1:
+ hdspm->control_register |= HDSPM_DoubleSpeed;
+ break;
+ case 2:
+ hdspm->control_register |= HDSPM_QuadSpeed;
+ break;
+ }
+ hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
+
+ return 0;
+}
+
+static int snd_hdspm_info_madi_speedmode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static char *texts[] = { "Single", "Double", "Quad" };
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = 3;
+
+ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+ uinfo->value.enumerated.item =
+ uinfo->value.enumerated.items - 1;
+ strcpy(uinfo->value.enumerated.name,
+ texts[uinfo->value.enumerated.item]);
+
+ return 0;
+}
+
+static int snd_hdspm_get_madi_speedmode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+
+ spin_lock_irq(&hdspm->lock);
+ ucontrol->value.enumerated.item[0] = hdspm_madi_speedmode(hdspm);
+ spin_unlock_irq(&hdspm->lock);
+ return 0;
+}
+
+static int snd_hdspm_put_madi_speedmode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ int change;
+ int val;
+
+ if (!snd_hdspm_use_is_exclusive(hdspm))
+ return -EBUSY;
+ val = ucontrol->value.integer.value[0];
+ if (val < 0)
+ val = 0;
+ if (val > 2)
+ val = 2;
+ spin_lock_irq(&hdspm->lock);
+ change = val != hdspm_madi_speedmode(hdspm);
+ hdspm_set_madi_speedmode(hdspm, val);
+ spin_unlock_irq(&hdspm->lock);
+ return change;
+}
#define HDSPM_MIXER(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
@@ -4289,7 +4391,8 @@ static struct snd_kcontrol_new snd_hdspm_controls_madi[] = {
HDSPM_TX_64("TX 64 channels mode", 0),
HDSPM_C_TMS("Clear Track Marker", 0),
HDSPM_SAFE_MODE("Safe Mode", 0),
- HDSPM_INPUT_SELECT("Input Select", 0)
+ HDSPM_INPUT_SELECT("Input Select", 0),
+ HDSPM_MADI_SPEEDMODE("MADI Speed Mode", 0)
};
@@ -4302,7 +4405,8 @@ static struct snd_kcontrol_new snd_hdspm_controls_madiface[] = {
HDSPM_SYNC_CHECK("MADI SyncCheck", 0),
HDSPM_TX_64("TX 64 channels mode", 0),
HDSPM_C_TMS("Clear Track Marker", 0),
- HDSPM_SAFE_MODE("Safe Mode", 0)
+ HDSPM_SAFE_MODE("Safe Mode", 0),
+ HDSPM_MADI_SPEEDMODE("MADI Speed Mode", 0)
};
static struct snd_kcontrol_new snd_hdspm_controls_aio[] = {
@@ -6381,6 +6485,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
switch (hdspm->firmware_rev) {
case HDSPM_MADI_REV:
case HDSPM_MADI_OLD_REV:
+ case HDSPM_MADI_ANCIENT_REV:
hdspm->io_type = MADI;
hdspm->card_name = "RME MADI";
hdspm->midiPorts = 3;
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 34aa972669e..3de99af8cb8 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -290,6 +290,7 @@ static void txx9aclc_pcm_free_dma_buffers(struct snd_pcm *pcm)
static int txx9aclc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_card *card = rtd->card->snd_card;
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
struct platform_device *pdev = to_platform_device(dai->platform->dev);
diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore
new file mode 100644
index 00000000000..8a83dd2ffc1
--- /dev/null
+++ b/tools/power/cpupower/.gitignore
@@ -0,0 +1,22 @@
+.libs
+libcpupower.so
+libcpupower.so.0
+libcpupower.so.0.0.0
+build/ccdv
+cpufreq-info
+cpufreq-set
+cpufreq-aperf
+lib/.libs
+lib/cpufreq.lo
+lib/cpufreq.o
+lib/proc.lo
+lib/proc.o
+lib/sysfs.lo
+lib/sysfs.o
+po/cpupowerutils.pot
+po/*.gmo
+utils/cpufreq-info.o
+utils/cpufreq-set.o
+utils/cpufreq-aperf.o
+cpupower
+bench/cpufreq-bench
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
new file mode 100644
index 00000000000..94c2cf0a98b
--- /dev/null
+++ b/tools/power/cpupower/Makefile
@@ -0,0 +1,279 @@
+# Makefile for cpupower
+#
+# Copyright (C) 2005,2006 Dominik Brodowski <linux@dominikbrodowski.net>
+#
+# Based largely on the Makefile for udev by:
+#
+# Copyright (C) 2003,2004 Greg Kroah-Hartman <greg@kroah.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+# --- CONFIGURATION BEGIN ---
+
+# Set the following to `true' to make a unstripped, unoptimized
+# binary. Leave this set to `false' for production use.
+DEBUG ?= false
+
+# make the build silent. Set this to something else to make it noisy again.
+V ?= false
+
+# Internationalization support (output in different languages).
+# Requires gettext.
+NLS ?= true
+
+# Set the following to 'true' to build/install the
+# cpufreq-bench benchmarking tool
+CPUFRQ_BENCH ?= true
+
+# Prefix to the directories we're installing to
+DESTDIR ?=
+
+# --- CONFIGURATION END ---
+
+
+
+# Package-related definitions. Distributions can modify the version
+# and _should_ modify the PACKAGE_BUGREPORT definition
+
+VERSION= $(shell ./utils/version-gen.sh)
+LIB_MAJ= 0.0.0
+LIB_MIN= 0
+
+PACKAGE = cpupower
+PACKAGE_BUGREPORT = cpufreq@vger.kernel.org
+LANGUAGES = de fr it cs pt
+
+
+# Directory definitions. These are default and most probably
+# do not need to be changed. Please note that DESTDIR is
+# added in front of any of them
+
+bindir ?= /usr/bin
+sbindir ?= /usr/sbin
+mandir ?= /usr/man
+includedir ?= /usr/include
+libdir ?= /usr/lib
+localedir ?= /usr/share/locale
+docdir ?= /usr/share/doc/packages/cpupower
+confdir ?= /etc/
+
+# Toolchain: what tools do we use, and what options do they need:
+
+CP = cp -fpR
+INSTALL = /usr/bin/install -c
+INSTALL_PROGRAM = ${INSTALL}
+INSTALL_DATA = ${INSTALL} -m 644
+INSTALL_SCRIPT = ${INSTALL_PROGRAM}
+
+# If you are running a cross compiler, you may want to set this
+# to something more interesting, like "arm-linux-". If you want
+# to compile vs uClibc, that can be done here as well.
+CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
+CC = $(CROSS)gcc
+LD = $(CROSS)gcc
+AR = $(CROSS)ar
+STRIP = $(CROSS)strip
+RANLIB = $(CROSS)ranlib
+HOSTCC = gcc
+
+
+# Now we set up the build system
+#
+
+# set up PWD so that older versions of make will work with our build.
+PWD = $(shell pwd)
+
+GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo po/$$HLANG.gmo; done;}
+
+export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
+
+# check if compiler option is supported
+cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+
+# use '-Os' optimization if available, else use -O2
+OPTIMIZATION := $(call cc-supports,-Os,-O2)
+
+WARNINGS := -Wall -Wchar-subscripts -Wpointer-arith -Wsign-compare
+WARNINGS += $(call cc-supports,-Wno-pointer-sign)
+WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
+WARNINGS += -Wshadow
+
+CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
+ -DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE
+
+UTIL_OBJS = utils/helpers/amd.o utils/helpers/topology.o utils/helpers/msr.o \
+ utils/helpers/sysfs.o utils/helpers/misc.o utils/helpers/cpuid.o \
+ utils/helpers/pci.o utils/helpers/bitmask.o \
+ utils/idle_monitor/nhm_idle.o utils/idle_monitor/snb_idle.o \
+ utils/idle_monitor/amd_fam14h_idle.o utils/idle_monitor/cpuidle_sysfs.o \
+ utils/idle_monitor/mperf_monitor.o utils/idle_monitor/cpupower-monitor.o \
+ utils/cpupower.o utils/cpufreq-info.o utils/cpufreq-set.o \
+ utils/cpupower-set.o utils/cpupower-info.o utils/cpuidle-info.o
+
+UTIL_HEADERS = utils/helpers/helpers.h utils/idle_monitor/cpupower-monitor.h \
+ utils/helpers/bitmask.h \
+ utils/idle_monitor/idle_monitors.h utils/idle_monitor/idle_monitors.def
+
+UTIL_SRC := $(UTIL_OBJS:.o=.c)
+
+LIB_HEADERS = lib/cpufreq.h lib/sysfs.h
+LIB_SRC = lib/cpufreq.c lib/sysfs.c
+LIB_OBJS = lib/cpufreq.o lib/sysfs.o
+
+CFLAGS += -pipe
+
+ifeq ($(strip $(NLS)),true)
+ INSTALL_NLS += install-gmo
+ COMPILE_NLS += create-gmo
+endif
+
+ifeq ($(strip $(CPUFRQ_BENCH)),true)
+ INSTALL_BENCH += install-bench
+ COMPILE_BENCH += compile-bench
+endif
+
+CFLAGS += $(WARNINGS)
+
+ifeq ($(strip $(V)),false)
+ QUIET=@
+ ECHO=@echo
+else
+ QUIET=
+ ECHO=@\#
+endif
+export QUIET ECHO
+
+# if DEBUG is enabled, then we do not strip or optimize
+ifeq ($(strip $(DEBUG)),true)
+ CFLAGS += -O1 -g -DDEBUG
+ STRIPCMD = /bin/true -Since_we_are_debugging
+else
+ CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
+ STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
+endif
+
+
+# the actual make rules
+
+all: libcpupower cpupower $(COMPILE_NLS) $(COMPILE_BENCH)
+
+lib/%.o: $(LIB_SRC) $(LIB_HEADERS)
+ $(ECHO) " CC " $@
+ $(QUIET) $(CC) $(CFLAGS) -fPIC -o $@ -c lib/$*.c
+
+libcpupower.so.$(LIB_MAJ): $(LIB_OBJS)
+ $(ECHO) " LD " $@
+ $(QUIET) $(CC) -shared $(CFLAGS) $(LDFLAGS) -o $@ \
+ -Wl,-soname,libcpupower.so.$(LIB_MIN) $(LIB_OBJS)
+ @ln -sf $@ libcpupower.so
+ @ln -sf $@ libcpupower.so.$(LIB_MIN)
+
+libcpupower: libcpupower.so.$(LIB_MAJ)
+
+# Let all .o files depend on its .c file and all headers
+# Might be worth to put this into utils/Makefile at some point of time
+$(UTIL_OBJS): $(UTIL_HEADERS)
+
+.c.o:
+ $(ECHO) " CC " $@
+ $(QUIET) $(CC) $(CFLAGS) -I./lib -I ./utils -o $@ -c $*.c
+
+cpupower: $(UTIL_OBJS) libcpupower.so.$(LIB_MAJ)
+ $(ECHO) " CC " $@
+ $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) -lcpupower -lrt -lpci -L. -o $@ $(UTIL_OBJS)
+ $(QUIET) $(STRIPCMD) $@
+
+po/$(PACKAGE).pot: $(UTIL_SRC)
+ $(ECHO) " GETTEXT " $@
+ $(QUIET) xgettext --default-domain=$(PACKAGE) --add-comments \
+ --keyword=_ --keyword=N_ $(UTIL_SRC) && \
+ test -f $(PACKAGE).po && \
+ mv -f $(PACKAGE).po po/$(PACKAGE).pot
+
+po/%.gmo: po/%.po
+ $(ECHO) " MSGFMT " $@
+ $(QUIET) msgfmt -o $@ po/$*.po
+
+create-gmo: ${GMO_FILES}
+
+update-po: po/$(PACKAGE).pot
+ $(ECHO) " MSGMRG " $@
+ $(QUIET) @for HLANG in $(LANGUAGES); do \
+ echo -n "Updating $$HLANG "; \
+ if msgmerge po/$$HLANG.po po/$(PACKAGE).pot -o \
+ po/$$HLANG.new.po; then \
+ mv -f po/$$HLANG.new.po po/$$HLANG.po; \
+ else \
+ echo "msgmerge for $$HLANG failed!"; \
+ rm -f po/$$HLANG.new.po; \
+ fi; \
+ done;
+
+compile-bench: libcpupower.so.$(LIB_MAJ)
+ @V=$(V) confdir=$(confdir) $(MAKE) -C bench
+
+clean:
+ -find . \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \
+ | xargs rm -f
+ -rm -f $(UTIL_BINS)
+ -rm -f $(IDLE_OBJS)
+ -rm -f cpupower
+ -rm -f libcpupower.so*
+ -rm -rf po/*.gmo po/*.pot
+ $(MAKE) -C bench clean
+
+
+install-lib:
+ $(INSTALL) -d $(DESTDIR)${libdir}
+ $(CP) libcpupower.so* $(DESTDIR)${libdir}/
+ $(INSTALL) -d $(DESTDIR)${includedir}
+ $(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h
+
+install-tools:
+ $(INSTALL) -d $(DESTDIR)${bindir}
+ $(INSTALL_PROGRAM) cpupower $(DESTDIR)${bindir}
+
+install-man:
+ $(INSTALL_DATA) -D man/cpupower.1 $(DESTDIR)${mandir}/man1/cpupower.1
+ $(INSTALL_DATA) -D man/cpupower-frequency-set.1 $(DESTDIR)${mandir}/man1/cpupower-frequency-set.1
+ $(INSTALL_DATA) -D man/cpupower-frequency-info.1 $(DESTDIR)${mandir}/man1/cpupower-frequency-info.1
+ $(INSTALL_DATA) -D man/cpupower-set.1 $(DESTDIR)${mandir}/man1/cpupower-set.1
+ $(INSTALL_DATA) -D man/cpupower-info.1 $(DESTDIR)${mandir}/man1/cpupower-info.1
+ $(INSTALL_DATA) -D man/cpupower-monitor.1 $(DESTDIR)${mandir}/man1/cpupower-monitor.1
+
+install-gmo:
+ $(INSTALL) -d $(DESTDIR)${localedir}
+ for HLANG in $(LANGUAGES); do \
+ echo '$(INSTALL_DATA) -D po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo'; \
+ $(INSTALL_DATA) -D po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
+ done;
+
+install-bench:
+ @#DESTDIR must be set from outside to survive
+ @sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench install
+
+install: all install-lib install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH)
+
+uninstall:
+ - rm -f $(DESTDIR)${libdir}/libcpupower.*
+ - rm -f $(DESTDIR)${includedir}/cpufreq.h
+ - rm -f $(DESTDIR)${bindir}/utils/cpupower
+ - rm -f $(DESTDIR)${mandir}/man1/cpufreq-set.1
+ - rm -f $(DESTDIR)${mandir}/man1/cpufreq-info.1
+ - for HLANG in $(LANGUAGES); do \
+ rm -f $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
+ done;
+
+.PHONY: all utils libcpupower update-po create-gmo install-lib install-tools install-man install-gmo install uninstall clean
diff --git a/tools/power/cpupower/README b/tools/power/cpupower/README
new file mode 100644
index 00000000000..fd9d4c0d668
--- /dev/null
+++ b/tools/power/cpupower/README
@@ -0,0 +1,49 @@
+The cpufrequtils package (homepage:
+http://www.kernel.org/pub/linux/utils/kernel/cpufreq/cpufrequtils.html )
+consists of the following elements:
+
+requirements
+------------
+
+On x86 pciutils is needed at runtime (-lpci).
+For compilation pciutils-devel (pci/pci.h) and a gcc version
+providing cpuid.h is needed.
+For both it's not explicitly checked for (yet).
+
+
+libcpufreq
+----------
+
+"libcpufreq" is a library which offers a unified access method for userspace
+tools and programs to the cpufreq core and drivers in the Linux kernel. This
+allows for code reduction in userspace tools, a clean implementation of
+the interaction to the cpufreq core, and support for both the sysfs and proc
+interfaces [depending on configuration, see below].
+
+
+compilation and installation
+----------------------------
+
+make
+su
+make install
+
+should suffice on most systems. It builds default libcpufreq,
+cpufreq-set and cpufreq-info files and installs them in /usr/lib and
+/usr/bin, respectively. If you want to set up the paths differently and/or
+want to configure the package to your specific needs, you need to open
+"Makefile" with an editor of your choice and edit the block marked
+CONFIGURATION.
+
+
+THANKS
+------
+Many thanks to Mattia Dongili who wrote the autotoolization and
+libtoolization, the manpages and the italian language file for cpufrequtils;
+to Dave Jones for his feedback and his dump_psb tool; to Bruno Ducrot for his
+powernow-k8-decode and intel_gsic tools as well as the french language file;
+and to various others commenting on the previous (pre-)releases of
+cpufrequtils.
+
+
+ Dominik Brodowski
diff --git a/tools/power/cpupower/ToDo b/tools/power/cpupower/ToDo
new file mode 100644
index 00000000000..874b78b586e
--- /dev/null
+++ b/tools/power/cpupower/ToDo
@@ -0,0 +1,11 @@
+ToDos sorted by priority:
+
+- Use bitmask functions to parse CPU topology more robust
+ (current implementation has issues on AMD)
+- Try to read out boost states and frequencies on Intel
+- Adjust README
+- Somewhere saw the ability to read power consumption of
+ RAM from HW on Intel SandyBridge -> another monitor?
+- Add another c1e debug idle monitor
+ -> Is by design racy with BIOS, but could be added
+ with a --force option and some "be careful" messages
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
new file mode 100644
index 00000000000..2b67606fc3e
--- /dev/null
+++ b/tools/power/cpupower/bench/Makefile
@@ -0,0 +1,29 @@
+LIBS = -L../ -lm -lcpupower
+
+OBJS = main.o parse.o system.o benchmark.o
+CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
+
+%.o : %.c
+ $(ECHO) " CC " $@
+ $(QUIET) $(CC) -c $(CFLAGS) $< -o $@
+
+cpufreq-bench: $(OBJS)
+ $(ECHO) " CC " $@
+ $(QUIET) $(CC) -o $@ $(CFLAGS) $(OBJS) $(LIBS)
+
+all: cpufreq-bench
+
+install:
+ mkdir -p $(DESTDIR)/$(sbindir)
+ mkdir -p $(DESTDIR)/$(bindir)
+ mkdir -p $(DESTDIR)/$(docdir)
+ mkdir -p $(DESTDIR)/$(confdir)
+ install -m 755 cpufreq-bench $(DESTDIR)/$(sbindir)/cpufreq-bench
+ install -m 755 cpufreq-bench_plot.sh $(DESTDIR)/$(bindir)/cpufreq-bench_plot.sh
+ install -m 644 README-BENCH $(DESTDIR)/$(docdir)/README-BENCH
+ install -m 755 cpufreq-bench_script.sh $(DESTDIR)/$(docdir)/cpufreq-bench_script.sh
+ install -m 644 example.cfg $(DESTDIR)/$(confdir)/cpufreq-bench.conf
+
+clean:
+ rm -f *.o
+ rm -f cpufreq-bench
diff --git a/tools/power/cpupower/bench/README-BENCH b/tools/power/cpupower/bench/README-BENCH
new file mode 100644
index 00000000000..8093ec73817
--- /dev/null
+++ b/tools/power/cpupower/bench/README-BENCH
@@ -0,0 +1,124 @@
+This is cpufreq-bench, a microbenchmark for the cpufreq framework.
+
+Purpose
+=======
+
+What is this benchmark for:
+ - Identify worst case performance loss when doing dynamic frequency
+ scaling using Linux kernel governors
+ - Identify average reaction time of a governor to CPU load changes
+ - (Stress) Testing whether a cpufreq low level driver or governor works
+ as expected
+ - Identify cpufreq related performance regressions between kernels
+ - Possibly Real time priority testing? -> what happens if there are
+ processes with a higher prio than the governor's kernel thread
+ - ...
+
+What this benchmark does *not* cover:
+ - Power saving related regressions (In fact as better the performance
+ throughput is, the worse the power savings will be, but the first should
+ mostly count more...)
+ - Real world (workloads)
+
+
+Description
+===========
+
+cpufreq-bench helps to test the condition of a given cpufreq governor.
+For that purpose, it compares the performance governor to a configured
+powersave module.
+
+
+How it works
+============
+You can specify load (100% CPU load) and sleep (0% CPU load) times in us which
+will be run X time in a row (cycles):
+
+ sleep=25000
+ load=25000
+ cycles=20
+
+This part of the configuration file will create 25ms load/sleep turns,
+repeated 20 times.
+
+Adding this:
+ sleep_step=25000
+ load_step=25000
+ rounds=5
+Will increase load and sleep time by 25ms 5 times.
+Together you get following test:
+25ms load/sleep time repeated 20 times (cycles).
+50ms load/sleep time repeated 20 times (cycles).
+..
+100ms load/sleep time repeated 20 times (cycles).
+
+First it is calibrated how long a specific CPU intensive calculation
+takes on this machine and needs to be run in a loop using the performance
+governor.
+Then the above test runs are processed using the performance governor
+and the governor to test. The time the calculation really needed
+with the dynamic freq scaling governor is compared with the time needed
+on full performance and you get the overall performance loss.
+
+
+Example of expected results with ondemand governor:
+
+This shows expected results of the first two test run rounds from
+above config, you there have:
+
+100% CPU load (load) | 0 % CPU load (sleep) | round
+ 25 ms | 25 ms | 1
+ 50 ms | 50 ms | 2
+
+For example if ondemand governor is configured to have a 50ms
+sampling rate you get:
+
+In round 1, ondemand should have rather static 50% load and probably
+won't ever switch up (as long as up_threshold is above).
+
+In round 2, if the ondemand sampling times exactly match the load/sleep
+trigger of the cpufreq-bench, you will see no performance loss (compare with
+below possible ondemand sample kick ins (1)):
+
+But if ondemand always kicks in in the middle of the load sleep cycles, it
+will always see 50% loads and you get worst performance impact never
+switching up (compare with below possible ondemand sample kick ins (2))::
+
+ 50 50 50 50ms ->time
+load -----| |-----| |-----| |-----|
+ | | | | | | |
+sleep |-----| |-----| |-----| |----
+ |-----|-----|-----|-----|-----|-----|-----|---- ondemand sampling (1)
+ 100 0 100 0 100 0 100 load seen by ondemand(%)
+ |-----|-----|-----|-----|-----|-----|-----|-- ondemand sampling (2)
+ 50 50 50 50 50 50 50 load seen by ondemand(%)
+
+You can easily test all kind of load/sleep times and check whether your
+governor in average behaves as expected.
+
+
+ToDo
+====
+
+Provide a gnuplot utility script for easy generation of plots to present
+the outcome nicely.
+
+
+cpufreq-bench Command Usage
+===========================
+-l, --load=<long int> initial load time in us
+-s, --sleep=<long int> initial sleep time in us
+-x, --load-step=<long int> time to be added to load time, in us
+-y, --sleep-step=<long int> time to be added to sleep time, in us
+-c, --cpu=<unsigned int> CPU Number to use, starting at 0
+-p, --prio=<priority> scheduler priority, HIGH, LOW or DEFAULT
+-g, --governor=<governor> cpufreq governor to test
+-n, --cycles=<int> load/sleep cycles to get an avarage value to compare
+-r, --rounds<int> load/sleep rounds
+-f, --file=<configfile> config file to use
+-o, --output=<dir> output dir, must exist
+-v, --verbose verbose output on/off
+
+Due to the high priority, the application may not be responsible for some time.
+After the benchmark, the logfile is saved in OUTPUTDIR/benchmark_TIMESTAMP.log
+
diff --git a/tools/power/cpupower/bench/benchmark.c b/tools/power/cpupower/bench/benchmark.c
new file mode 100644
index 00000000000..81b1c48607d
--- /dev/null
+++ b/tools/power/cpupower/bench/benchmark.c
@@ -0,0 +1,194 @@
+/* cpufreq-bench CPUFreq microbenchmark
+ *
+ * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <math.h>
+
+#include "config.h"
+#include "system.h"
+#include "benchmark.h"
+
+/* Print out progress if we log into a file */
+#define show_progress(total_time, progress_time) \
+if (config->output != stdout) { \
+ fprintf(stdout, "Progress: %02lu %%\r", \
+ (progress_time * 100) / total_time); \
+ fflush(stdout); \
+}
+
+/**
+ * compute how many rounds of calculation we should do
+ * to get the given load time
+ *
+ * @param load aimed load time in µs
+ *
+ * @retval rounds of calculation
+ **/
+
+unsigned int calculate_timespace(long load, struct config *config)
+{
+ int i;
+ long long now, then;
+ unsigned int estimated = GAUGECOUNT;
+ unsigned int rounds = 0;
+ unsigned int timed = 0;
+
+ if (config->verbose)
+ printf("calibrating load of %lius, please wait...\n", load);
+
+ /* get the initial calculation time for a specific number of rounds */
+ now = get_time();
+ ROUNDS(estimated);
+ then = get_time();
+
+ timed = (unsigned int)(then - now);
+
+ /* approximation of the wanted load time by comparing with the
+ * initial calculation time */
+ for (i = 0; i < 4; i++) {
+ rounds = (unsigned int)(load * estimated / timed);
+ dprintf("calibrating with %u rounds\n", rounds);
+ now = get_time();
+ ROUNDS(rounds);
+ then = get_time();
+
+ timed = (unsigned int)(then - now);
+ estimated = rounds;
+ }
+ if (config->verbose)
+ printf("calibration done\n");
+
+ return estimated;
+}
+
+/**
+ * benchmark
+ * generates a specific sleep an load time with the performance
+ * governor and compares the used time for same calculations done
+ * with the configured powersave governor
+ *
+ * @param config config values for the benchmark
+ *
+ **/
+
+void start_benchmark(struct config *config)
+{
+ unsigned int _round, cycle;
+ long long now, then;
+ long sleep_time = 0, load_time = 0;
+ long performance_time = 0, powersave_time = 0;
+ unsigned int calculations;
+ unsigned long total_time = 0, progress_time = 0;
+
+ sleep_time = config->sleep;
+ load_time = config->load;
+
+ /* For the progress bar */
+ for (_round = 1; _round <= config->rounds; _round++)
+ total_time += _round * (config->sleep + config->load);
+ total_time *= 2; /* powersave and performance cycles */
+
+ for (_round = 0; _round < config->rounds; _round++) {
+ performance_time = 0LL;
+ powersave_time = 0LL;
+
+ show_progress(total_time, progress_time);
+
+ /* set the cpufreq governor to "performance" which disables
+ * P-State switching. */
+ if (set_cpufreq_governor("performance", config->cpu) != 0)
+ return;
+
+ /* calibrate the calculation time. the resulting calculation
+ * _rounds should produce a load which matches the configured
+ * load time */
+ calculations = calculate_timespace(load_time, config);
+
+ if (config->verbose)
+ printf("_round %i: doing %u cycles with %u calculations"
+ " for %lius\n", _round + 1, config->cycles,
+ calculations, load_time);
+
+ fprintf(config->output, "%u %li %li ",
+ _round, load_time, sleep_time);
+
+ if (config->verbose)
+ printf("avarage: %lius, rps:%li\n",
+ load_time / calculations,
+ 1000000 * calculations / load_time);
+
+ /* do some sleep/load cycles with the performance governor */
+ for (cycle = 0; cycle < config->cycles; cycle++) {
+ now = get_time();
+ usleep(sleep_time);
+ ROUNDS(calculations);
+ then = get_time();
+ performance_time += then - now - sleep_time;
+ if (config->verbose)
+ printf("performance cycle took %lius, "
+ "sleep: %lius, "
+ "load: %lius, rounds: %u\n",
+ (long)(then - now), sleep_time,
+ load_time, calculations);
+ }
+ fprintf(config->output, "%li ",
+ performance_time / config->cycles);
+
+ progress_time += sleep_time + load_time;
+ show_progress(total_time, progress_time);
+
+ /* set the powersave governor which activates P-State switching
+ * again */
+ if (set_cpufreq_governor(config->governor, config->cpu) != 0)
+ return;
+
+ /* again, do some sleep/load cycles with the
+ * powersave governor */
+ for (cycle = 0; cycle < config->cycles; cycle++) {
+ now = get_time();
+ usleep(sleep_time);
+ ROUNDS(calculations);
+ then = get_time();
+ powersave_time += then - now - sleep_time;
+ if (config->verbose)
+ printf("powersave cycle took %lius, "
+ "sleep: %lius, "
+ "load: %lius, rounds: %u\n",
+ (long)(then - now), sleep_time,
+ load_time, calculations);
+ }
+
+ progress_time += sleep_time + load_time;
+
+ /* compare the avarage sleep/load cycles */
+ fprintf(config->output, "%li ",
+ powersave_time / config->cycles);
+ fprintf(config->output, "%.3f\n",
+ performance_time * 100.0 / powersave_time);
+ fflush(config->output);
+
+ if (config->verbose)
+ printf("performance is at %.2f%%\n",
+ performance_time * 100.0 / powersave_time);
+
+ sleep_time += config->sleep_step;
+ load_time += config->load_step;
+ }
+}
diff --git a/tools/power/cpupower/bench/benchmark.h b/tools/power/cpupower/bench/benchmark.h
new file mode 100644
index 00000000000..51d7f50ac2b
--- /dev/null
+++ b/tools/power/cpupower/bench/benchmark.h
@@ -0,0 +1,29 @@
+/* cpufreq-bench CPUFreq microbenchmark
+ *
+ * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* load loop, this schould take about 1 to 2ms to complete */
+#define ROUNDS(x) {unsigned int rcnt; \
+ for (rcnt = 0; rcnt < x*1000; rcnt++) { \
+ (void)(((int)(pow(rcnt, rcnt) * \
+ sqrt(rcnt*7230970)) ^ 7230716) ^ \
+ (int)atan2(rcnt, rcnt)); \
+ } } \
+
+
+void start_benchmark(struct config *config);
diff --git a/tools/power/cpupower/bench/config.h b/tools/power/cpupower/bench/config.h
new file mode 100644
index 00000000000..ee6f258e533
--- /dev/null
+++ b/tools/power/cpupower/bench/config.h
@@ -0,0 +1,36 @@
+/* cpufreq-bench CPUFreq microbenchmark
+ *
+ * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* initial loop count for the load calibration */
+#define GAUGECOUNT 1500
+
+/* default scheduling policy SCHED_OTHER */
+#define SCHEDULER SCHED_OTHER
+
+#define PRIORITY_DEFAULT 0
+#define PRIORITY_HIGH sched_get_priority_max(SCHEDULER)
+#define PRIORITY_LOW sched_get_priority_min(SCHEDULER)
+
+/* enable further debug messages */
+#ifdef DEBUG
+#define dprintf printf
+#else
+#define dprintf(...) do { } while (0)
+#endif
+
diff --git a/tools/power/cpupower/bench/cpufreq-bench_plot.sh b/tools/power/cpupower/bench/cpufreq-bench_plot.sh
new file mode 100644
index 00000000000..410021a12f4
--- /dev/null
+++ b/tools/power/cpupower/bench/cpufreq-bench_plot.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# Author/Copyright(c): 2009, Thomas Renninger <trenn@suse.de>, Novell Inc.
+
+# Helper script to easily create nice plots of your cpufreq-bench results
+
+dir=`mktemp -d`
+output_file="cpufreq-bench.png"
+global_title="cpufreq-bench plot"
+picture_type="jpeg"
+file[0]=""
+
+function usage()
+{
+ echo "cpufreq-bench_plot.sh [OPTIONS] logfile [measure_title] [logfile [measure_title]] ...]"
+ echo
+ echo "Options"
+ echo " -o output_file"
+ echo " -t global_title"
+ echo " -p picture_type [jpeg|gif|png|postscript|...]"
+ exit 1
+}
+
+if [ $# -eq 0 ];then
+ echo "No benchmark results file provided"
+ echo
+ usage
+fi
+
+while getopts o:t:p: name ; do
+ case $name in
+ o)
+ output_file="$OPTARG".$picture_type
+ ;;
+ t)
+ global_title="$OPTARG"
+ ;;
+ p)
+ picture_type="$OPTARG"
+ ;;
+ ?)
+ usage
+ ;;
+ esac
+done
+shift $(($OPTIND -1))
+
+plots=0
+while [ "$1" ];do
+ if [ ! -f "$1" ];then
+ echo "File $1 does not exist"
+ usage
+ fi
+ file[$plots]="$1"
+ title[$plots]="$2"
+ # echo "File: ${file[$plots]} - ${title[plots]}"
+ shift;shift
+ plots=$((plots + 1))
+done
+
+echo "set terminal $picture_type" >> $dir/plot_script.gpl
+echo "set output \"$output_file\"" >> $dir/plot_script.gpl
+echo "set title \"$global_title\"" >> $dir/plot_script.gpl
+echo "set xlabel \"sleep/load time\"" >> $dir/plot_script.gpl
+echo "set ylabel \"Performance (%)\"" >> $dir/plot_script.gpl
+
+for((plot=0;plot<$plots;plot++));do
+
+ # Sanity check
+ ###### I am to dump to get this redirected to stderr/stdout in one awk call... #####
+ cat ${file[$plot]} |grep -v "^#" |awk '{if ($2 != $3) printf("Error in measure %d:Load time %s does not equal sleep time %s, plot will not be correct\n", $1, $2, $3); ERR=1}'
+ ###### I am to dump to get this redirected in one awk call... #####
+
+ # Parse out load time (which must be equal to sleep time for a plot), divide it by 1000
+ # to get ms and parse out the performance in percentage and write it to a temp file for plotting
+ cat ${file[$plot]} |grep -v "^#" |awk '{printf "%lu %.1f\n",$2/1000, $6}' >$dir/data_$plot
+
+ if [ $plot -eq 0 ];then
+ echo -n "plot " >> $dir/plot_script.gpl
+ fi
+ echo -n "\"$dir/data_$plot\" title \"${title[$plot]}\" with lines" >> $dir/plot_script.gpl
+ if [ $(($plot + 1)) -ne $plots ];then
+ echo -n ", " >> $dir/plot_script.gpl
+ fi
+done
+echo >> $dir/plot_script.gpl
+
+gnuplot $dir/plot_script.gpl
+rm -r $dir \ No newline at end of file
diff --git a/tools/power/cpupower/bench/cpufreq-bench_script.sh b/tools/power/cpupower/bench/cpufreq-bench_script.sh
new file mode 100644
index 00000000000..de20d2a0687
--- /dev/null
+++ b/tools/power/cpupower/bench/cpufreq-bench_script.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# Author/Copyright(c): 2009, Thomas Renninger <trenn@suse.de>, Novell Inc.
+
+# Ondemand up_threshold and sampling rate test script for cpufreq-bench
+# mircobenchmark.
+# Modify the general variables at the top or extend or copy out parts
+# if you want to test other things
+#
+
+# Default with latest kernels is 95, before micro account patches
+# it was 80, cmp. with git commit 808009131046b62ac434dbc796
+UP_THRESHOLD="60 80 95"
+# Depending on the kernel and the HW sampling rate could be restricted
+# and cannot be set that low...
+# E.g. before git commit cef9615a853ebc4972084f7 one could only set
+# min sampling rate of 80000 if CONFIG_HZ=250
+SAMPLING_RATE="20000 80000"
+
+function measure()
+{
+ local -i up_threshold_set
+ local -i sampling_rate_set
+
+ for up_threshold in $UP_THRESHOLD;do
+ for sampling_rate in $SAMPLING_RATE;do
+ # Set values in sysfs
+ echo $up_threshold >/sys/devices/system/cpu/cpu0/cpufreq/ondemand/up_threshold
+ echo $sampling_rate >/sys/devices/system/cpu/cpu0/cpufreq/ondemand/sampling_rate
+ up_threshold_set=$(cat /sys/devices/system/cpu/cpu0/cpufreq/ondemand/up_threshold)
+ sampling_rate_set=$(cat /sys/devices/system/cpu/cpu0/cpufreq/ondemand/sampling_rate)
+
+ # Verify set values in sysfs
+ if [ ${up_threshold_set} -eq ${up_threshold} ];then
+ echo "up_threshold: $up_threshold, set in sysfs: ${up_threshold_set}"
+ else
+ echo "WARNING: Tried to set up_threshold: $up_threshold, set in sysfs: ${up_threshold_set}"
+ fi
+ if [ ${sampling_rate_set} -eq ${sampling_rate} ];then
+ echo "sampling_rate: $sampling_rate, set in sysfs: ${sampling_rate_set}"
+ else
+ echo "WARNING: Tried to set sampling_rate: $sampling_rate, set in sysfs: ${sampling_rate_set}"
+ fi
+
+ # Benchmark
+ cpufreq-bench -o /var/log/cpufreq-bench/up_threshold_${up_threshold}_sampling_rate_${sampling_rate}
+ done
+ done
+}
+
+function create_plots()
+{
+ local command
+
+ for up_threshold in $UP_THRESHOLD;do
+ command="cpufreq-bench_plot.sh -o \"sampling_rate_${SAMPLING_RATE}_up_threshold_${up_threshold}\" -t \"Ondemand sampling_rate: ${SAMPLING_RATE} comparison - Up_threshold: $up_threshold %\""
+ for sampling_rate in $SAMPLING_RATE;do
+ command="${command} /var/log/cpufreq-bench/up_threshold_${up_threshold}_sampling_rate_${sampling_rate}/* \"sampling_rate = $sampling_rate\""
+ done
+ echo $command
+ eval "$command"
+ echo
+ done
+
+ for sampling_rate in $SAMPLING_RATE;do
+ command="cpufreq-bench_plot.sh -o \"up_threshold_${UP_THRESHOLD}_sampling_rate_${sampling_rate}\" -t \"Ondemand up_threshold: ${UP_THRESHOLD} % comparison - sampling_rate: $sampling_rate\""
+ for up_threshold in $UP_THRESHOLD;do
+ command="${command} /var/log/cpufreq-bench/up_threshold_${up_threshold}_sampling_rate_${sampling_rate}/* \"up_threshold = $up_threshold\""
+ done
+ echo $command
+ eval "$command"
+ echo
+ done
+
+ command="cpufreq-bench_plot.sh -o \"up_threshold_${UP_THRESHOLD}_sampling_rate_${SAMPLING_RATE}\" -t \"Ondemand up_threshold: ${UP_THRESHOLD} and sampling_rate ${SAMPLING_RATE} comparison\""
+ for sampling_rate in $SAMPLING_RATE;do
+ for up_threshold in $UP_THRESHOLD;do
+ command="${command} /var/log/cpufreq-bench/up_threshold_${up_threshold}_sampling_rate_${sampling_rate}/* \"up_threshold = $up_threshold - sampling_rate = $sampling_rate\""
+ done
+ done
+ echo "$command"
+ eval "$command"
+}
+
+measure
+create_plots \ No newline at end of file
diff --git a/tools/power/cpupower/bench/example.cfg b/tools/power/cpupower/bench/example.cfg
new file mode 100644
index 00000000000..f91f6436068
--- /dev/null
+++ b/tools/power/cpupower/bench/example.cfg
@@ -0,0 +1,11 @@
+sleep = 50000
+load = 50000
+cpu = 0
+priority = LOW
+output = /var/log/cpufreq-bench
+sleep_step = 50000
+load_step = 50000
+cycles = 20
+rounds = 40
+verbose = 0
+governor = ondemand
diff --git a/tools/power/cpupower/bench/main.c b/tools/power/cpupower/bench/main.c
new file mode 100644
index 00000000000..24910313a52
--- /dev/null
+++ b/tools/power/cpupower/bench/main.c
@@ -0,0 +1,202 @@
+/* cpufreq-bench CPUFreq microbenchmark
+ *
+ * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <errno.h>
+
+#include "config.h"
+#include "system.h"
+#include "benchmark.h"
+
+static struct option long_options[] = {
+ {"output", 1, 0, 'o'},
+ {"sleep", 1, 0, 's'},
+ {"load", 1, 0, 'l'},
+ {"verbose", 0, 0, 'v'},
+ {"cpu", 1, 0, 'c'},
+ {"governor", 1, 0, 'g'},
+ {"prio", 1, 0, 'p'},
+ {"file", 1, 0, 'f'},
+ {"cycles", 1, 0, 'n'},
+ {"rounds", 1, 0, 'r'},
+ {"load-step", 1, 0, 'x'},
+ {"sleep-step", 1, 0, 'y'},
+ {"help", 0, 0, 'h'},
+ {0, 0, 0, 0}
+};
+
+/*******************************************************************
+ usage
+*******************************************************************/
+
+void usage()
+{
+ printf("usage: ./bench\n");
+ printf("Options:\n");
+ printf(" -l, --load=<long int>\t\tinitial load time in us\n");
+ printf(" -s, --sleep=<long int>\t\tinitial sleep time in us\n");
+ printf(" -x, --load-step=<long int>\ttime to be added to load time, in us\n");
+ printf(" -y, --sleep-step=<long int>\ttime to be added to sleep time, in us\n");
+ printf(" -c, --cpu=<cpu #>\t\t\tCPU Nr. to use, starting at 0\n");
+ printf(" -p, --prio=<priority>\t\t\tscheduler priority, HIGH, LOW or DEFAULT\n");
+ printf(" -g, --governor=<governor>\t\tcpufreq governor to test\n");
+ printf(" -n, --cycles=<int>\t\t\tload/sleep cycles\n");
+ printf(" -r, --rounds<int>\t\t\tload/sleep rounds\n");
+ printf(" -f, --file=<configfile>\t\tconfig file to use\n");
+ printf(" -o, --output=<dir>\t\t\toutput path. Filename will be OUTPUTPATH/benchmark_TIMESTAMP.log\n");
+ printf(" -v, --verbose\t\t\t\tverbose output on/off\n");
+ printf(" -h, --help\t\t\t\tPrint this help screen\n");
+ exit(1);
+}
+
+/*******************************************************************
+ main
+*******************************************************************/
+
+int main(int argc, char **argv)
+{
+ int c;
+ int option_index = 0;
+ struct config *config = NULL;
+
+ config = prepare_default_config();
+
+ if (config == NULL)
+ return EXIT_FAILURE;
+
+ while (1) {
+ c = getopt_long (argc, argv, "hg:o:s:l:vc:p:f:n:r:x:y:",
+ long_options, &option_index);
+ if (c == -1)
+ break;
+
+ switch (c) {
+ case 'o':
+ if (config->output != NULL)
+ fclose(config->output);
+
+ config->output = prepare_output(optarg);
+
+ if (config->output == NULL)
+ return EXIT_FAILURE;
+
+ dprintf("user output path -> %s\n", optarg);
+ break;
+ case 's':
+ sscanf(optarg, "%li", &config->sleep);
+ dprintf("user sleep time -> %s\n", optarg);
+ break;
+ case 'l':
+ sscanf(optarg, "%li", &config->load);
+ dprintf("user load time -> %s\n", optarg);
+ break;
+ case 'c':
+ sscanf(optarg, "%u", &config->cpu);
+ dprintf("user cpu -> %s\n", optarg);
+ break;
+ case 'g':
+ strncpy(config->governor, optarg, 14);
+ dprintf("user governor -> %s\n", optarg);
+ break;
+ case 'p':
+ if (string_to_prio(optarg) != SCHED_ERR) {
+ config->prio = string_to_prio(optarg);
+ dprintf("user prio -> %s\n", optarg);
+ } else {
+ if (config != NULL) {
+ if (config->output != NULL)
+ fclose(config->output);
+ free(config);
+ }
+ usage();
+ }
+ break;
+ case 'n':
+ sscanf(optarg, "%u", &config->cycles);
+ dprintf("user cycles -> %s\n", optarg);
+ break;
+ case 'r':
+ sscanf(optarg, "%u", &config->rounds);
+ dprintf("user rounds -> %s\n", optarg);
+ break;
+ case 'x':
+ sscanf(optarg, "%li", &config->load_step);
+ dprintf("user load_step -> %s\n", optarg);
+ break;
+ case 'y':
+ sscanf(optarg, "%li", &config->sleep_step);
+ dprintf("user sleep_step -> %s\n", optarg);
+ break;
+ case 'f':
+ if (prepare_config(optarg, config))
+ return EXIT_FAILURE;
+ break;
+ case 'v':
+ config->verbose = 1;
+ dprintf("verbose output enabled\n");
+ break;
+ case 'h':
+ case '?':
+ default:
+ if (config != NULL) {
+ if (config->output != NULL)
+ fclose(config->output);
+ free(config);
+ }
+ usage();
+ }
+ }
+
+ if (config->verbose) {
+ printf("starting benchmark with parameters:\n");
+ printf("config:\n\t"
+ "sleep=%li\n\t"
+ "load=%li\n\t"
+ "sleep_step=%li\n\t"
+ "load_step=%li\n\t"
+ "cpu=%u\n\t"
+ "cycles=%u\n\t"
+ "rounds=%u\n\t"
+ "governor=%s\n\n",
+ config->sleep,
+ config->load,
+ config->sleep_step,
+ config->load_step,
+ config->cpu,
+ config->cycles,
+ config->rounds,
+ config->governor);
+ }
+
+ prepare_user(config);
+ prepare_system(config);
+ start_benchmark(config);
+
+ if (config->output != stdout)
+ fclose(config->output);
+
+ free(config);
+
+ return EXIT_SUCCESS;
+}
+
diff --git a/tools/power/cpupower/bench/parse.c b/tools/power/cpupower/bench/parse.c
new file mode 100644
index 00000000000..543bba14ae2
--- /dev/null
+++ b/tools/power/cpupower/bench/parse.c
@@ -0,0 +1,225 @@
+/* cpufreq-bench CPUFreq microbenchmark
+ *
+ * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <time.h>
+#include <dirent.h>
+
+#include <sys/utsname.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "parse.h"
+#include "config.h"
+
+/**
+ * converts priority string to priority
+ *
+ * @param str string that represents a scheduler priority
+ *
+ * @retval priority
+ * @retval SCHED_ERR when the priority doesn't exit
+ **/
+
+enum sched_prio string_to_prio(const char *str)
+{
+ if (strncasecmp("high", str, strlen(str)) == 0)
+ return SCHED_HIGH;
+ else if (strncasecmp("default", str, strlen(str)) == 0)
+ return SCHED_DEFAULT;
+ else if (strncasecmp("low", str, strlen(str)) == 0)
+ return SCHED_LOW;
+ else
+ return SCHED_ERR;
+}
+
+/**
+ * create and open logfile
+ *
+ * @param dir directory in which the logfile should be created
+ *
+ * @retval logfile on success
+ * @retval NULL when the file can't be created
+ **/
+
+FILE *prepare_output(const char *dirname)
+{
+ FILE *output = NULL;
+ int len;
+ char *filename;
+ struct utsname sysdata;
+ DIR *dir;
+
+ dir = opendir(dirname);
+ if (dir == NULL) {
+ if (mkdir(dirname, 0755)) {
+ perror("mkdir");
+ fprintf(stderr, "error: Cannot create dir %s\n",
+ dirname);
+ return NULL;
+ }
+ }
+
+ len = strlen(dirname) + 30;
+ filename = malloc(sizeof(char) * len);
+
+ if (uname(&sysdata) == 0) {
+ len += strlen(sysdata.nodename) + strlen(sysdata.release);
+ filename = realloc(filename, sizeof(char) * len);
+
+ if (filename == NULL) {
+ perror("realloc");
+ return NULL;
+ }
+
+ snprintf(filename, len - 1, "%s/benchmark_%s_%s_%li.log",
+ dirname, sysdata.nodename, sysdata.release, time(NULL));
+ } else {
+ snprintf(filename, len - 1, "%s/benchmark_%li.log",
+ dirname, time(NULL));
+ }
+
+ dprintf("logilename: %s\n", filename);
+
+ output = fopen(filename, "w+");
+ if (output == NULL) {
+ perror("fopen");
+ fprintf(stderr, "error: unable to open logfile\n");
+ }
+
+ fprintf(stdout, "Logfile: %s\n", filename);
+
+ free(filename);
+ fprintf(output, "#round load sleep performance powersave percentage\n");
+ return output;
+}
+
+/**
+ * returns the default config
+ *
+ * @retval default config on success
+ * @retval NULL when the output file can't be created
+ **/
+
+struct config *prepare_default_config()
+{
+ struct config *config = malloc(sizeof(struct config));
+
+ dprintf("loading defaults\n");
+
+ config->sleep = 500000;
+ config->load = 500000;
+ config->sleep_step = 500000;
+ config->load_step = 500000;
+ config->cycles = 5;
+ config->rounds = 50;
+ config->cpu = 0;
+ config->prio = SCHED_HIGH;
+ config->verbose = 0;
+ strncpy(config->governor, "ondemand", 8);
+
+ config->output = stdout;
+
+#ifdef DEFAULT_CONFIG_FILE
+ if (prepare_config(DEFAULT_CONFIG_FILE, config))
+ return NULL;
+#endif
+ return config;
+}
+
+/**
+ * parses config file and returns the config to the caller
+ *
+ * @param path config file name
+ *
+ * @retval 1 on error
+ * @retval 0 on success
+ **/
+
+int prepare_config(const char *path, struct config *config)
+{
+ size_t len = 0;
+ char *opt, *val, *line = NULL;
+ FILE *configfile = fopen(path, "r");
+
+ if (config == NULL) {
+ fprintf(stderr, "error: config is NULL\n");
+ return 1;
+ }
+
+ if (configfile == NULL) {
+ perror("fopen");
+ fprintf(stderr, "error: unable to read configfile\n");
+ free(config);
+ return 1;
+ }
+
+ while (getline(&line, &len, configfile) != -1) {
+ if (line[0] == '#' || line[0] == ' ')
+ continue;
+
+ sscanf(line, "%as = %as", &opt, &val);
+
+ dprintf("parsing: %s -> %s\n", opt, val);
+
+ if (strncmp("sleep", opt, strlen(opt)) == 0)
+ sscanf(val, "%li", &config->sleep);
+
+ else if (strncmp("load", opt, strlen(opt)) == 0)
+ sscanf(val, "%li", &config->load);
+
+ else if (strncmp("load_step", opt, strlen(opt)) == 0)
+ sscanf(val, "%li", &config->load_step);
+
+ else if (strncmp("sleep_step", opt, strlen(opt)) == 0)
+ sscanf(val, "%li", &config->sleep_step);
+
+ else if (strncmp("cycles", opt, strlen(opt)) == 0)
+ sscanf(val, "%u", &config->cycles);
+
+ else if (strncmp("rounds", opt, strlen(opt)) == 0)
+ sscanf(val, "%u", &config->rounds);
+
+ else if (strncmp("verbose", opt, strlen(opt)) == 0)
+ sscanf(val, "%u", &config->verbose);
+
+ else if (strncmp("output", opt, strlen(opt)) == 0)
+ config->output = prepare_output(val);
+
+ else if (strncmp("cpu", opt, strlen(opt)) == 0)
+ sscanf(val, "%u", &config->cpu);
+
+ else if (strncmp("governor", opt, 14) == 0)
+ strncpy(config->governor, val, 14);
+
+ else if (strncmp("priority", opt, strlen(opt)) == 0) {
+ if (string_to_prio(val) != SCHED_ERR)
+ config->prio = string_to_prio(val);
+ }
+ }
+
+ free(line);
+ free(opt);
+ free(val);
+
+ return 0;
+}
diff --git a/tools/power/cpupower/bench/parse.h b/tools/power/cpupower/bench/parse.h
new file mode 100644
index 00000000000..a8dc632d9ee
--- /dev/null
+++ b/tools/power/cpupower/bench/parse.h
@@ -0,0 +1,53 @@
+/* cpufreq-bench CPUFreq microbenchmark
+ *
+ * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* struct that holds the required config parameters */
+struct config
+{
+ long sleep; /* sleep time in µs */
+ long load; /* load time in µs */
+ long sleep_step; /* time value which changes the
+ * sleep time after every round in µs */
+ long load_step; /* time value which changes the
+ * load time after every round in µs */
+ unsigned int cycles; /* calculation cycles with the same sleep/load time */
+ unsigned int rounds; /* calculation rounds with iterated sleep/load time */
+ unsigned int cpu; /* cpu for which the affinity is set */
+ char governor[15]; /* cpufreq governor */
+ enum sched_prio /* possible scheduler priorities */
+ {
+ SCHED_ERR = -1,
+ SCHED_HIGH,
+ SCHED_DEFAULT,
+ SCHED_LOW
+ } prio;
+
+ unsigned int verbose; /* verbose output */
+ FILE *output; /* logfile */
+ char *output_filename; /* logfile name, must be freed at the end
+ if output != NULL and output != stdout*/
+};
+
+enum sched_prio string_to_prio(const char *str);
+
+FILE *prepare_output(const char *dir);
+
+int prepare_config(const char *path, struct config *config);
+struct config *prepare_default_config();
+
diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
new file mode 100644
index 00000000000..f01e3f4be84
--- /dev/null
+++ b/tools/power/cpupower/bench/system.c
@@ -0,0 +1,191 @@
+/* cpufreq-bench CPUFreq microbenchmark
+ *
+ * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <sched.h>
+
+#include <cpufreq.h>
+
+#include "config.h"
+#include "system.h"
+
+/**
+ * returns time since epoch in µs
+ *
+ * @retval time
+ **/
+
+long long int get_time()
+{
+ struct timeval now;
+
+ gettimeofday(&now, NULL);
+
+ return (long long int)(now.tv_sec * 1000000LL + now.tv_usec);
+}
+
+/**
+ * sets the cpufreq governor
+ *
+ * @param governor cpufreq governor name
+ * @param cpu cpu for which the governor should be set
+ *
+ * @retval 0 on success
+ * @retval -1 when failed
+ **/
+
+int set_cpufreq_governor(char *governor, unsigned int cpu)
+{
+
+ dprintf("set %s as cpufreq governor\n", governor);
+
+ if (cpufreq_cpu_exists(cpu) != 0) {
+ perror("cpufreq_cpu_exists");
+ fprintf(stderr, "error: cpu %u does not exist\n", cpu);
+ return -1;
+ }
+
+ if (cpufreq_modify_policy_governor(cpu, governor) != 0) {
+ perror("cpufreq_modify_policy_governor");
+ fprintf(stderr, "error: unable to set %s governor\n", governor);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * sets cpu affinity for the process
+ *
+ * @param cpu cpu# to which the affinity should be set
+ *
+ * @retval 0 on success
+ * @retval -1 when setting the affinity failed
+ **/
+
+int set_cpu_affinity(unsigned int cpu)
+{
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+
+ dprintf("set affinity to cpu #%u\n", cpu);
+
+ if (sched_setaffinity(getpid(), sizeof(cpu_set_t), &cpuset) < 0) {
+ perror("sched_setaffinity");
+ fprintf(stderr, "warning: unable to set cpu affinity\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * sets the process priority parameter
+ *
+ * @param priority priority value
+ *
+ * @retval 0 on success
+ * @retval -1 when setting the priority failed
+ **/
+
+int set_process_priority(int priority)
+{
+ struct sched_param param;
+
+ dprintf("set scheduler priority to %i\n", priority);
+
+ param.sched_priority = priority;
+
+ if (sched_setscheduler(0, SCHEDULER, &param) < 0) {
+ perror("sched_setscheduler");
+ fprintf(stderr, "warning: unable to set scheduler priority\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * notifies the user that the benchmark may run some time
+ *
+ * @param config benchmark config values
+ *
+ **/
+
+void prepare_user(const struct config *config)
+{
+ unsigned long sleep_time = 0;
+ unsigned long load_time = 0;
+ unsigned int round;
+
+ for (round = 0; round < config->rounds; round++) {
+ sleep_time += 2 * config->cycles *
+ (config->sleep + config->sleep_step * round);
+ load_time += 2 * config->cycles *
+ (config->load + config->load_step * round) +
+ (config->load + config->load_step * round * 4);
+ }
+
+ if (config->verbose || config->output != stdout)
+ printf("approx. test duration: %im\n",
+ (int)((sleep_time + load_time) / 60000000));
+}
+
+/**
+ * sets up the cpu affinity and scheduler priority
+ *
+ * @param config benchmark config values
+ *
+ **/
+
+void prepare_system(const struct config *config)
+{
+ if (config->verbose)
+ printf("set cpu affinity to cpu #%u\n", config->cpu);
+
+ set_cpu_affinity(config->cpu);
+
+ switch (config->prio) {
+ case SCHED_HIGH:
+ if (config->verbose)
+ printf("high priority condition requested\n");
+
+ set_process_priority(PRIORITY_HIGH);
+ break;
+ case SCHED_LOW:
+ if (config->verbose)
+ printf("low priority condition requested\n");
+
+ set_process_priority(PRIORITY_LOW);
+ break;
+ default:
+ if (config->verbose)
+ printf("default priority condition requested\n");
+
+ set_process_priority(PRIORITY_DEFAULT);
+ }
+}
+
diff --git a/tools/power/cpupower/bench/system.h b/tools/power/cpupower/bench/system.h
new file mode 100644
index 00000000000..3a8c858b78f
--- /dev/null
+++ b/tools/power/cpupower/bench/system.h
@@ -0,0 +1,29 @@
+/* cpufreq-bench CPUFreq microbenchmark
+ *
+ * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "parse.h"
+
+long long get_time();
+
+int set_cpufreq_governor(char *governor, unsigned int cpu);
+int set_cpu_affinity(unsigned int cpu);
+int set_process_priority(int priority);
+
+void prepare_user(const struct config *config);
+void prepare_system(const struct config *config);
diff --git a/tools/power/cpupower/debug/i386/Makefile b/tools/power/cpupower/debug/i386/Makefile
new file mode 100644
index 00000000000..d08cc1ead9b
--- /dev/null
+++ b/tools/power/cpupower/debug/i386/Makefile
@@ -0,0 +1,20 @@
+default: all
+
+centrino-decode: centrino-decode.c
+ $(CC) $(CFLAGS) -o centrino-decode centrino-decode.c
+
+dump_psb: dump_psb.c
+ $(CC) $(CFLAGS) -o dump_psb dump_psb.c
+
+intel_gsic: intel_gsic.c
+ $(CC) $(CFLAGS) -o intel_gsic -llrmi intel_gsic.c
+
+powernow-k8-decode: powernow-k8-decode.c
+ $(CC) $(CFLAGS) -o powernow-k8-decode powernow-k8-decode.c
+
+all: centrino-decode dump_psb intel_gsic powernow-k8-decode
+
+clean:
+ rm -rf centrino-decode dump_psb intel_gsic powernow-k8-decode
+
+.PHONY: all default clean
diff --git a/tools/power/cpupower/debug/i386/centrino-decode.c b/tools/power/cpupower/debug/i386/centrino-decode.c
new file mode 100644
index 00000000000..7ef24cce492
--- /dev/null
+++ b/tools/power/cpupower/debug/i386/centrino-decode.c
@@ -0,0 +1,113 @@
+/*
+ * (C) 2003 - 2004 Dominik Brodowski <linux@dominikbrodowski.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Based on code found in
+ * linux/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+ * and originally developed by Jeremy Fitzhardinge.
+ *
+ * USAGE: simply run it to decode the current settings on CPU 0,
+ * or pass the CPU number as argument, or pass the MSR content
+ * as argument.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#define MCPU 32
+
+#define MSR_IA32_PERF_STATUS 0x198
+
+static int rdmsr(unsigned int cpu, unsigned int msr,
+ unsigned int *lo, unsigned int *hi)
+{
+ int fd;
+ char file[20];
+ unsigned long long val;
+ int retval = -1;
+
+ *lo = *hi = 0;
+
+ if (cpu > MCPU)
+ goto err1;
+
+ sprintf(file, "/dev/cpu/%d/msr", cpu);
+ fd = open(file, O_RDONLY);
+
+ if (fd < 0)
+ goto err1;
+
+ if (lseek(fd, msr, SEEK_CUR) == -1)
+ goto err2;
+
+ if (read(fd, &val, 8) != 8)
+ goto err2;
+
+ *lo = (uint32_t )(val & 0xffffffffull);
+ *hi = (uint32_t )(val>>32 & 0xffffffffull);
+
+ retval = 0;
+err2:
+ close(fd);
+err1:
+ return retval;
+}
+
+static void decode (unsigned int msr)
+{
+ unsigned int multiplier;
+ unsigned int mv;
+
+ multiplier = ((msr >> 8) & 0xFF);
+
+ mv = (((msr & 0xFF) * 16) + 700);
+
+ printf("0x%x means multiplier %d @ %d mV\n", msr, multiplier, mv);
+}
+
+static int decode_live(unsigned int cpu)
+{
+ unsigned int lo, hi;
+ int err;
+
+ err = rdmsr(cpu, MSR_IA32_PERF_STATUS, &lo, &hi);
+
+ if (err) {
+ printf("can't get MSR_IA32_PERF_STATUS for cpu %d\n", cpu);
+ printf("Possible trouble: you don't run an Enhanced SpeedStep capable cpu\n");
+ printf("or you are not root, or the msr driver is not present\n");
+ return 1;
+ }
+
+ decode(lo);
+
+ return 0;
+}
+
+int main (int argc, char **argv)
+{
+ unsigned int cpu, mode = 0;
+
+ if (argc < 2)
+ cpu = 0;
+ else {
+ cpu = strtoul(argv[1], NULL, 0);
+ if (cpu >= MCPU)
+ mode = 1;
+ }
+
+ if (mode)
+ decode(cpu);
+ else
+ decode_live(cpu);
+
+ return 0;
+}
diff --git a/tools/power/cpupower/debug/i386/dump_psb.c b/tools/power/cpupower/debug/i386/dump_psb.c
new file mode 100644
index 00000000000..8d6a4751425
--- /dev/null
+++ b/tools/power/cpupower/debug/i386/dump_psb.c
@@ -0,0 +1,196 @@
+/*
+ * dump_psb. (c) 2004, Dave Jones, Red Hat Inc.
+ * Licensed under the GPL v2.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#define _GNU_SOURCE
+#include <getopt.h>
+
+#include <sys/mman.h>
+
+#define LEN (0x100000 - 0xc0000)
+#define OFFSET (0xc0000)
+
+#ifndef __packed
+#define __packed __attribute((packed))
+#endif
+
+static long relevant;
+
+static const int fid_to_mult[32] = {
+ 110, 115, 120, 125, 50, 55, 60, 65,
+ 70, 75, 80, 85, 90, 95, 100, 105,
+ 30, 190, 40, 200, 130, 135, 140, 210,
+ 150, 225, 160, 165, 170, 180, -1, -1,
+};
+
+static const int vid_to_voltage[32] = {
+ 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
+ 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0,
+ 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
+ 1075, 1050, 1024, 1000, 975, 950, 925, 0,
+};
+
+struct psb_header {
+ char signature[10];
+ u_char version;
+ u_char flags;
+ u_short settlingtime;
+ u_char res1;
+ u_char numpst;
+} __packed;
+
+struct pst_header {
+ u_int32_t cpuid;
+ u_char fsb;
+ u_char maxfid;
+ u_char startvid;
+ u_char numpstates;
+} __packed;
+
+static u_int fsb;
+static u_int sgtc;
+
+static int
+decode_pst(char *p, int npstates)
+{
+ int i;
+ int freq, fid, vid;
+
+ for (i = 0; i < npstates; ++i) {
+ fid = *p++;
+ vid = *p++;
+ freq = 100 * fid_to_mult[fid] * fsb;
+
+ printf(" %2d %8dkHz FID %02x (%2d.%01d) VID %02x (%4dmV)\n",
+ i,
+ freq,
+ fid, fid_to_mult[fid]/10, fid_to_mult[fid]%10,
+ vid, vid_to_voltage[vid]);
+ }
+
+ return 0;
+}
+
+static
+void decode_psb(char *p, int numpst)
+{
+ int i;
+ struct psb_header *psb;
+ struct pst_header *pst;
+
+ psb = (struct psb_header*) p;
+
+ if (psb->version != 0x12)
+ return;
+
+ printf("PSB version: %hhx flags: %hhx settling time %hhuus res1 %hhx num pst %hhu\n",
+ psb->version,
+ psb->flags,
+ psb->settlingtime,
+ psb->res1,
+ psb->numpst);
+ sgtc = psb->settlingtime * 100;
+
+ if (sgtc < 10000)
+ sgtc = 10000;
+
+ p = ((char *) psb) + sizeof(struct psb_header);
+
+ if (numpst < 0)
+ numpst = psb->numpst;
+ else
+ printf("Overriding number of pst :%d\n", numpst);
+
+ for (i = 0; i < numpst; i++) {
+ pst = (struct pst_header*) p;
+
+ if (relevant != 0) {
+ if (relevant!= pst->cpuid)
+ goto next_one;
+ }
+
+ printf(" PST %d cpuid %.3x fsb %hhu mfid %hhx svid %hhx numberstates %hhu\n",
+ i+1,
+ pst->cpuid,
+ pst->fsb,
+ pst->maxfid,
+ pst->startvid,
+ pst->numpstates);
+
+ fsb = pst->fsb;
+ decode_pst(p + sizeof(struct pst_header), pst->numpstates);
+
+next_one:
+ p += sizeof(struct pst_header) + 2*pst->numpstates;
+ }
+
+}
+
+static struct option info_opts[] = {
+ {.name = "numpst", .has_arg=no_argument, .flag=NULL, .val='n'},
+};
+
+void print_help(void)
+{
+ printf ("Usage: dump_psb [options]\n");
+ printf ("Options:\n");
+ printf (" -n, --numpst Set number of PST tables to scan\n");
+ printf (" -r, --relevant Only display PSTs relevant to cpuid N\n");
+}
+
+int
+main(int argc, char *argv[])
+{
+ int fd;
+ int numpst=-1;
+ int ret=0, cont=1;
+ char *mem = NULL;
+ char *p;
+
+ do {
+ ret = getopt_long(argc, argv, "hr:n:", info_opts, NULL);
+ switch (ret){
+ case '?':
+ case 'h':
+ print_help();
+ cont = 0;
+ break;
+ case 'r':
+ relevant = strtol(optarg, NULL, 16);
+ break;
+ case 'n':
+ numpst = strtol(optarg, NULL, 10);
+ break;
+ case -1:
+ cont = 0;
+ break;
+ }
+
+ } while(cont);
+
+ fd = open("/dev/mem", O_RDONLY);
+ if (fd < 0) {
+ printf ("Couldn't open /dev/mem. Are you root?\n");
+ exit(1);
+ }
+
+ mem = mmap(mem, 0x100000 - 0xc0000, PROT_READ, MAP_SHARED, fd, 0xc0000);
+ close(fd);
+
+ for (p = mem; p - mem < LEN; p+=16) {
+ if (memcmp(p, "AMDK7PNOW!", 10) == 0) {
+ decode_psb(p, numpst);
+ break;
+ }
+ }
+
+ munmap(mem, LEN);
+ return 0;
+}
diff --git a/tools/power/cpupower/debug/i386/intel_gsic.c b/tools/power/cpupower/debug/i386/intel_gsic.c
new file mode 100644
index 00000000000..53f5293c9c9
--- /dev/null
+++ b/tools/power/cpupower/debug/i386/intel_gsic.c
@@ -0,0 +1,78 @@
+/*
+ * (C) 2003 Bruno Ducrot
+ * (C) 2004 Dominik Brodowski <linux@dominikbrodowski.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Based on code found in
+ * linux/include/asm-i386/ist.h and linux/arch/i386/kernel/setup.c
+ * and originally developed by Andy Grover <andrew.grover@intel.com>
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <lrmi.h>
+
+int main (void)
+{
+ struct LRMI_regs r;
+ int retval;
+
+ if (!LRMI_init())
+ return 0;
+
+ memset(&r, 0, sizeof(r));
+
+ r.eax = 0x0000E980;
+ r.edx = 0x47534943;
+
+ retval = LRMI_int(0x15, &r);
+
+ if (!retval) {
+ printf("Failed!\n");
+ return 0;
+ }
+ if (r.eax == 0x47534943) {
+ printf("BIOS supports GSIC call:\n");
+ printf("\tsignature: %c%c%c%c\n",
+ (r.eax >> 24) & 0xff,
+ (r.eax >> 16) & 0xff,
+ (r.eax >> 8) & 0xff,
+ (r.eax) & 0xff);
+ printf("\tcommand port = 0x%.4x\n",
+ r.ebx & 0xffff);
+ printf("\tcommand = 0x%.4x\n",
+ (r.ebx >> 16) & 0xffff);
+ printf("\tevent port = 0x%.8x\n", r.ecx);
+ printf("\tflags = 0x%.8x\n", r.edx);
+ if (((r.ebx >> 16) & 0xffff) != 0x82) {
+ printf("non-default command value. If speedstep-smi "
+ "doesn't work out of the box,\nyou may want to "
+ "try out the default value by passing "
+ "smi_cmd=0x82 to the module\n ON YOUR OWN "
+ "RISK.\n");
+ }
+ if ((r.ebx & 0xffff) != 0xb2) {
+ printf("non-default command port. If speedstep-smi "
+ "doesn't work out of the box,\nyou may want to "
+ "try out the default value by passing "
+ "smi_port=0x82 to the module\n ON YOUR OWN "
+ "RISK.\n");
+ }
+ } else {
+ printf("BIOS DOES NOT support GSIC call. Dumping registers anyway:\n");
+ printf("eax = 0x%.8x\n", r.eax);
+ printf("ebx = 0x%.8x\n", r.ebx);
+ printf("ecx = 0x%.8x\n", r.ecx);
+ printf("edx = 0x%.8x\n", r.edx);
+ printf("Note also that some BIOS do not support the initial "
+ "GSIC call, but the newer\nspeeedstep-smi driver may "
+ "work.\nFor this, you need to pass some arguments to "
+ "the speedstep-smi driver:\n");
+ printf("\tsmi_cmd=0x?? smi_port=0x?? smi_sig=1\n");
+ printf("\nUnfortunately, you have to know what exactly are "
+ "smi_cmd and smi_port, and this\nis system "
+ "dependant.\n");
+ }
+ return 1;
+}
diff --git a/tools/power/cpupower/debug/i386/powernow-k8-decode.c b/tools/power/cpupower/debug/i386/powernow-k8-decode.c
new file mode 100644
index 00000000000..638a6b3bfd9
--- /dev/null
+++ b/tools/power/cpupower/debug/i386/powernow-k8-decode.c
@@ -0,0 +1,96 @@
+/*
+ * (C) 2004 Bruno Ducrot <ducrot@poupinou.org>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Based on code found in
+ * linux/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+ * and originally developed by Paul Devriendt
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#define MCPU 32
+
+#define MSR_FIDVID_STATUS 0xc0010042
+
+#define MSR_S_HI_CURRENT_VID 0x0000001f
+#define MSR_S_LO_CURRENT_FID 0x0000003f
+
+static int get_fidvid(uint32_t cpu, uint32_t *fid, uint32_t *vid)
+{
+ int err = 1;
+ uint64_t msr = 0;
+ int fd;
+ char file[20];
+
+ if (cpu > MCPU)
+ goto out;
+
+ sprintf(file, "/dev/cpu/%d/msr", cpu);
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0)
+ goto out;
+ lseek(fd, MSR_FIDVID_STATUS, SEEK_CUR);
+ if (read(fd, &msr, 8) != 8)
+ goto err1;
+
+ *fid = ((uint32_t )(msr & 0xffffffffull)) & MSR_S_LO_CURRENT_FID;
+ *vid = ((uint32_t )(msr>>32 & 0xffffffffull)) & MSR_S_HI_CURRENT_VID;
+ err = 0;
+err1:
+ close(fd);
+out:
+ return err;
+}
+
+
+/* Return a frequency in MHz, given an input fid */
+static uint32_t find_freq_from_fid(uint32_t fid)
+{
+ return 800 + (fid * 100);
+}
+
+/* Return a voltage in miliVolts, given an input vid */
+static uint32_t find_millivolts_from_vid(uint32_t vid)
+{
+ return 1550-vid*25;
+}
+
+int main (int argc, char *argv[])
+{
+ int err;
+ int cpu;
+ uint32_t fid, vid;
+
+ if (argc < 2)
+ cpu = 0;
+ else
+ cpu = strtoul(argv[1], NULL, 0);
+
+ err = get_fidvid(cpu, &fid, &vid);
+
+ if (err) {
+ printf("can't get fid, vid from MSR\n");
+ printf("Possible trouble: you don't run a powernow-k8 capable cpu\n");
+ printf("or you are not root, or the msr driver is not present\n");
+ exit(1);
+ }
+
+
+ printf("cpu %d currently at %d MHz and %d mV\n",
+ cpu,
+ find_freq_from_fid(fid),
+ find_millivolts_from_vid(vid));
+
+ return 0;
+}
diff --git a/tools/power/cpupower/debug/kernel/Makefile b/tools/power/cpupower/debug/kernel/Makefile
new file mode 100644
index 00000000000..96b146fe6f8
--- /dev/null
+++ b/tools/power/cpupower/debug/kernel/Makefile
@@ -0,0 +1,23 @@
+obj-m :=
+
+KDIR := /lib/modules/$(shell uname -r)/build
+PWD := $(shell pwd)
+KMISC := /lib/modules/$(shell uname -r)/cpufrequtils/
+
+ifeq ("$(CONFIG_X86_TSC)", "y")
+ obj-m += cpufreq-test_tsc.o
+endif
+
+default:
+ $(MAKE) -C $(KDIR) M=$(PWD)
+
+clean:
+ - rm -rf *.o *.ko .tmp-versions .*.cmd .*.mod.* *.mod.c
+ - rm -rf .tmp_versions* Module.symvers modules.order
+
+install: default
+ install -d $(KMISC)
+ install -m 644 -c *.ko $(KMISC)
+ /sbin/depmod -a
+
+all: default
diff --git a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
new file mode 100644
index 00000000000..66cace601e5
--- /dev/null
+++ b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
@@ -0,0 +1,113 @@
+/*
+ * test module to check whether the TSC-based delay routine continues
+ * to work properly after cpufreq transitions. Needs ACPI to work
+ * properly.
+ *
+ * Based partly on the Power Management Timer (PMTMR) code to be found
+ * in arch/i386/kernel/timers/timer_pm.c on recent 2.6. kernels, especially
+ * code written by John Stultz. The read_pmtmr function was copied verbatim
+ * from that file.
+ *
+ * (C) 2004 Dominik Brodowski
+ *
+ * To use:
+ * 1.) pass clock=tsc to the kernel on your bootloader
+ * 2.) modprobe this module (it'll fail)
+ * 3.) change CPU frequency
+ * 4.) modprobe this module again
+ * 5.) if the third value, "diff_pmtmr", changes between 2. and 4., the
+ * TSC-based delay routine on the Linux kernel does not correctly
+ * handle the cpufreq transition. Please report this to
+ * cpufreq@vger.kernel.org
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+static int pm_tmr_ioport = 0;
+
+/*helper function to safely read acpi pm timesource*/
+static u32 read_pmtmr(void)
+{
+ u32 v1=0,v2=0,v3=0;
+ /* It has been reported that because of various broken
+ * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time
+ * source is not latched, so you must read it multiple
+ * times to insure a safe value is read.
+ */
+ do {
+ v1 = inl(pm_tmr_ioport);
+ v2 = inl(pm_tmr_ioport);
+ v3 = inl(pm_tmr_ioport);
+ } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
+ || (v3 > v1 && v3 < v2));
+
+ /* mask the output to 24 bits */
+ return (v2 & 0xFFFFFF);
+}
+
+static int __init cpufreq_test_tsc(void)
+{
+ u32 now, then, diff;
+ u64 now_tsc, then_tsc, diff_tsc;
+ int i;
+
+ /* the following code snipped is copied from arch/x86/kernel/acpi/boot.c
+ of Linux v2.6.25. */
+
+ /* detect the location of the ACPI PM Timer */
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
+ /* FADT rev. 2 */
+ if (acpi_gbl_FADT.xpm_timer_block.space_id !=
+ ACPI_ADR_SPACE_SYSTEM_IO)
+ return 0;
+
+ pm_tmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
+ /*
+ * "X" fields are optional extensions to the original V1.0
+ * fields, so we must selectively expand V1.0 fields if the
+ * corresponding X field is zero.
+ */
+ if (!pm_tmr_ioport)
+ pm_tmr_ioport = acpi_gbl_FADT.pm_timer_block;
+ } else {
+ /* FADT rev. 1 */
+ pm_tmr_ioport = acpi_gbl_FADT.pm_timer_block;
+ }
+
+ printk(KERN_DEBUG "start--> \n");
+ then = read_pmtmr();
+ rdtscll(then_tsc);
+ for (i=0;i<20;i++) {
+ mdelay(100);
+ now = read_pmtmr();
+ rdtscll(now_tsc);
+ diff = (now - then) & 0xFFFFFF;
+ diff_tsc = now_tsc - then_tsc;
+ printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);
+ then = now;
+ then_tsc = now_tsc;
+ }
+ printk(KERN_DEBUG "<-- end \n");
+ return -ENODEV;
+}
+
+static void __exit cpufreq_none(void)
+{
+ return;
+}
+
+module_init(cpufreq_test_tsc)
+module_exit(cpufreq_none)
+
+
+MODULE_AUTHOR("Dominik Brodowski");
+MODULE_DESCRIPTION("Verify the TSC cpufreq notifier working correctly -- needs ACPI-enabled system");
+MODULE_LICENSE ("GPL");
diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile
new file mode 100644
index 00000000000..dbf13998462
--- /dev/null
+++ b/tools/power/cpupower/debug/x86_64/Makefile
@@ -0,0 +1,14 @@
+default: all
+
+centrino-decode: centrino-decode.c
+ $(CC) $(CFLAGS) -o centrino-decode centrino-decode.c
+
+powernow-k8-decode: powernow-k8-decode.c
+ $(CC) $(CFLAGS) -o powernow-k8-decode powernow-k8-decode.c
+
+all: centrino-decode powernow-k8-decode
+
+clean:
+ rm -rf centrino-decode powernow-k8-decode
+
+.PHONY: all default clean
diff --git a/tools/power/cpupower/debug/x86_64/centrino-decode.c b/tools/power/cpupower/debug/x86_64/centrino-decode.c
new file mode 120000
index 00000000000..26fb3f1d8fc
--- /dev/null
+++ b/tools/power/cpupower/debug/x86_64/centrino-decode.c
@@ -0,0 +1 @@
+../i386/centrino-decode.c \ No newline at end of file
diff --git a/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c b/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c
new file mode 120000
index 00000000000..eb30c79cf9d
--- /dev/null
+++ b/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c
@@ -0,0 +1 @@
+../i386/powernow-k8-decode.c \ No newline at end of file
diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
new file mode 100644
index 00000000000..d961101d1ce
--- /dev/null
+++ b/tools/power/cpupower/lib/cpufreq.c
@@ -0,0 +1,208 @@
+/*
+ * (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ */
+
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "cpufreq.h"
+#include "sysfs.h"
+
+int cpufreq_cpu_exists(unsigned int cpu)
+{
+ return sysfs_cpu_exists(cpu);
+}
+
+unsigned long cpufreq_get_freq_kernel(unsigned int cpu)
+{
+ return sysfs_get_freq_kernel(cpu);
+}
+
+unsigned long cpufreq_get_freq_hardware(unsigned int cpu)
+{
+ return sysfs_get_freq_hardware(cpu);
+}
+
+unsigned long cpufreq_get_transition_latency(unsigned int cpu)
+{
+ return sysfs_get_freq_transition_latency(cpu);
+}
+
+int cpufreq_get_hardware_limits(unsigned int cpu,
+ unsigned long *min,
+ unsigned long *max)
+{
+ if ((!min) || (!max))
+ return -EINVAL;
+ return sysfs_get_freq_hardware_limits(cpu, min, max);
+}
+
+char *cpufreq_get_driver(unsigned int cpu)
+{
+ return sysfs_get_freq_driver(cpu);
+}
+
+void cpufreq_put_driver(char *ptr)
+{
+ if (!ptr)
+ return;
+ free(ptr);
+}
+
+struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu)
+{
+ return sysfs_get_freq_policy(cpu);
+}
+
+void cpufreq_put_policy(struct cpufreq_policy *policy)
+{
+ if ((!policy) || (!policy->governor))
+ return;
+
+ free(policy->governor);
+ policy->governor = NULL;
+ free(policy);
+}
+
+struct cpufreq_available_governors *cpufreq_get_available_governors(unsigned
+ int cpu)
+{
+ return sysfs_get_freq_available_governors(cpu);
+}
+
+void cpufreq_put_available_governors(struct cpufreq_available_governors *any)
+{
+ struct cpufreq_available_governors *tmp, *next;
+
+ if (!any)
+ return;
+
+ tmp = any->first;
+ while (tmp) {
+ next = tmp->next;
+ if (tmp->governor)
+ free(tmp->governor);
+ free(tmp);
+ tmp = next;
+ }
+}
+
+
+struct cpufreq_available_frequencies
+*cpufreq_get_available_frequencies(unsigned int cpu)
+{
+ return sysfs_get_available_frequencies(cpu);
+}
+
+void cpufreq_put_available_frequencies(struct cpufreq_available_frequencies
+ *any) {
+ struct cpufreq_available_frequencies *tmp, *next;
+
+ if (!any)
+ return;
+
+ tmp = any->first;
+ while (tmp) {
+ next = tmp->next;
+ free(tmp);
+ tmp = next;
+ }
+}
+
+
+struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned int cpu)
+{
+ return sysfs_get_freq_affected_cpus(cpu);
+}
+
+void cpufreq_put_affected_cpus(struct cpufreq_affected_cpus *any)
+{
+ struct cpufreq_affected_cpus *tmp, *next;
+
+ if (!any)
+ return;
+
+ tmp = any->first;
+ while (tmp) {
+ next = tmp->next;
+ free(tmp);
+ tmp = next;
+ }
+}
+
+
+struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned int cpu)
+{
+ return sysfs_get_freq_related_cpus(cpu);
+}
+
+void cpufreq_put_related_cpus(struct cpufreq_affected_cpus *any)
+{
+ cpufreq_put_affected_cpus(any);
+}
+
+
+int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy)
+{
+ if (!policy || !(policy->governor))
+ return -EINVAL;
+
+ return sysfs_set_freq_policy(cpu, policy);
+}
+
+
+int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq)
+{
+ return sysfs_modify_freq_policy_min(cpu, min_freq);
+}
+
+
+int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq)
+{
+ return sysfs_modify_freq_policy_max(cpu, max_freq);
+}
+
+
+int cpufreq_modify_policy_governor(unsigned int cpu, char *governor)
+{
+ if ((!governor) || (strlen(governor) > 19))
+ return -EINVAL;
+
+ return sysfs_modify_freq_policy_governor(cpu, governor);
+}
+
+int cpufreq_set_frequency(unsigned int cpu, unsigned long target_frequency)
+{
+ return sysfs_set_frequency(cpu, target_frequency);
+}
+
+struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
+ unsigned long long *total_time)
+{
+ return sysfs_get_freq_stats(cpu, total_time);
+}
+
+void cpufreq_put_stats(struct cpufreq_stats *any)
+{
+ struct cpufreq_stats *tmp, *next;
+
+ if (!any)
+ return;
+
+ tmp = any->first;
+ while (tmp) {
+ next = tmp->next;
+ free(tmp);
+ tmp = next;
+ }
+}
+
+unsigned long cpufreq_get_transitions(unsigned int cpu)
+{
+ return sysfs_get_freq_transitions(cpu);
+}
diff --git a/tools/power/cpupower/lib/cpufreq.h b/tools/power/cpupower/lib/cpufreq.h
new file mode 100644
index 00000000000..3aae8e7a083
--- /dev/null
+++ b/tools/power/cpupower/lib/cpufreq.h
@@ -0,0 +1,223 @@
+/*
+ * cpufreq.h - definitions for libcpufreq
+ *
+ * Copyright (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _CPUFREQ_H
+#define _CPUFREQ_H 1
+
+struct cpufreq_policy {
+ unsigned long min;
+ unsigned long max;
+ char *governor;
+};
+
+struct cpufreq_available_governors {
+ char *governor;
+ struct cpufreq_available_governors *next;
+ struct cpufreq_available_governors *first;
+};
+
+struct cpufreq_available_frequencies {
+ unsigned long frequency;
+ struct cpufreq_available_frequencies *next;
+ struct cpufreq_available_frequencies *first;
+};
+
+
+struct cpufreq_affected_cpus {
+ unsigned int cpu;
+ struct cpufreq_affected_cpus *next;
+ struct cpufreq_affected_cpus *first;
+};
+
+struct cpufreq_stats {
+ unsigned long frequency;
+ unsigned long long time_in_state;
+ struct cpufreq_stats *next;
+ struct cpufreq_stats *first;
+};
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * returns 0 if the specified CPU is present (it doesn't say
+ * whether it is online!), and an error value if not.
+ */
+
+extern int cpufreq_cpu_exists(unsigned int cpu);
+
+/* determine current CPU frequency
+ * - _kernel variant means kernel's opinion of CPU frequency
+ * - _hardware variant means actual hardware CPU frequency,
+ * which is only available to root.
+ *
+ * returns 0 on failure, else frequency in kHz.
+ */
+
+extern unsigned long cpufreq_get_freq_kernel(unsigned int cpu);
+
+extern unsigned long cpufreq_get_freq_hardware(unsigned int cpu);
+
+#define cpufreq_get(cpu) cpufreq_get_freq_kernel(cpu);
+
+
+/* determine CPU transition latency
+ *
+ * returns 0 on failure, else transition latency in 10^(-9) s = nanoseconds
+ */
+extern unsigned long cpufreq_get_transition_latency(unsigned int cpu);
+
+
+/* determine hardware CPU frequency limits
+ *
+ * These may be limited further by thermal, energy or other
+ * considerations by cpufreq policy notifiers in the kernel.
+ */
+
+extern int cpufreq_get_hardware_limits(unsigned int cpu,
+ unsigned long *min,
+ unsigned long *max);
+
+
+/* determine CPUfreq driver used
+ *
+ * Remember to call cpufreq_put_driver when no longer needed
+ * to avoid memory leakage, please.
+ */
+
+extern char *cpufreq_get_driver(unsigned int cpu);
+
+extern void cpufreq_put_driver(char *ptr);
+
+
+/* determine CPUfreq policy currently used
+ *
+ * Remember to call cpufreq_put_policy when no longer needed
+ * to avoid memory leakage, please.
+ */
+
+
+extern struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu);
+
+extern void cpufreq_put_policy(struct cpufreq_policy *policy);
+
+
+/* determine CPUfreq governors currently available
+ *
+ * may be modified by modprobe'ing or rmmod'ing other governors. Please
+ * free allocated memory by calling cpufreq_put_available_governors
+ * after use.
+ */
+
+
+extern struct cpufreq_available_governors
+*cpufreq_get_available_governors(unsigned int cpu);
+
+extern void cpufreq_put_available_governors(
+ struct cpufreq_available_governors *first);
+
+
+/* determine CPU frequency states available
+ *
+ * Only present on _some_ ->target() cpufreq drivers. For information purposes
+ * only. Please free allocated memory by calling
+ * cpufreq_put_available_frequencies after use.
+ */
+
+extern struct cpufreq_available_frequencies
+*cpufreq_get_available_frequencies(unsigned int cpu);
+
+extern void cpufreq_put_available_frequencies(
+ struct cpufreq_available_frequencies *first);
+
+
+/* determine affected CPUs
+ *
+ * Remember to call cpufreq_put_affected_cpus when no longer needed
+ * to avoid memory leakage, please.
+ */
+
+extern struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned
+ int cpu);
+
+extern void cpufreq_put_affected_cpus(struct cpufreq_affected_cpus *first);
+
+
+/* determine related CPUs
+ *
+ * Remember to call cpufreq_put_related_cpus when no longer needed
+ * to avoid memory leakage, please.
+ */
+
+extern struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned
+ int cpu);
+
+extern void cpufreq_put_related_cpus(struct cpufreq_affected_cpus *first);
+
+
+/* determine stats for cpufreq subsystem
+ *
+ * This is not available in all kernel versions or configurations.
+ */
+
+extern struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
+ unsigned long long *total_time);
+
+extern void cpufreq_put_stats(struct cpufreq_stats *stats);
+
+extern unsigned long cpufreq_get_transitions(unsigned int cpu);
+
+
+/* set new cpufreq policy
+ *
+ * Tries to set the passed policy as new policy as close as possible,
+ * but results may differ depending e.g. on governors being available.
+ */
+
+extern int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy);
+
+
+/* modify a policy by only changing min/max freq or governor
+ *
+ * Does not check whether result is what was intended.
+ */
+
+extern int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq);
+extern int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq);
+extern int cpufreq_modify_policy_governor(unsigned int cpu, char *governor);
+
+
+/* set a specific frequency
+ *
+ * Does only work if userspace governor can be used and no external
+ * interference (other calls to this function or to set/modify_policy)
+ * occurs. Also does not work on ->range() cpufreq drivers.
+ */
+
+extern int cpufreq_set_frequency(unsigned int cpu,
+ unsigned long target_frequency);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CPUFREQ_H */
diff --git a/tools/power/cpupower/lib/sysfs.c b/tools/power/cpupower/lib/sysfs.c
new file mode 100644
index 00000000000..870713a75a8
--- /dev/null
+++ b/tools/power/cpupower/lib/sysfs.c
@@ -0,0 +1,672 @@
+/*
+ * (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "cpufreq.h"
+
+#define PATH_TO_CPU "/sys/devices/system/cpu/"
+#define MAX_LINE_LEN 4096
+#define SYSFS_PATH_MAX 255
+
+
+static unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
+{
+ int fd;
+ ssize_t numread;
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ return 0;
+
+ numread = read(fd, buf, buflen - 1);
+ if (numread < 1) {
+ close(fd);
+ return 0;
+ }
+
+ buf[numread] = '\0';
+ close(fd);
+
+ return (unsigned int) numread;
+}
+
+
+/* CPUFREQ sysfs access **************************************************/
+
+/* helper function to read file from /sys into given buffer */
+/* fname is a relative path under "cpuX/cpufreq" dir */
+static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
+ char *buf, size_t buflen)
+{
+ char path[SYSFS_PATH_MAX];
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
+ cpu, fname);
+ return sysfs_read_file(path, buf, buflen);
+}
+
+/* helper function to write a new value to a /sys file */
+/* fname is a relative path under "cpuX/cpufreq" dir */
+static unsigned int sysfs_cpufreq_write_file(unsigned int cpu,
+ const char *fname,
+ const char *value, size_t len)
+{
+ char path[SYSFS_PATH_MAX];
+ int fd;
+ ssize_t numwrite;
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
+ cpu, fname);
+
+ fd = open(path, O_WRONLY);
+ if (fd == -1)
+ return 0;
+
+ numwrite = write(fd, value, len);
+ if (numwrite < 1) {
+ close(fd);
+ return 0;
+ }
+
+ close(fd);
+
+ return (unsigned int) numwrite;
+}
+
+/* read access to files which contain one numeric value */
+
+enum cpufreq_value {
+ CPUINFO_CUR_FREQ,
+ CPUINFO_MIN_FREQ,
+ CPUINFO_MAX_FREQ,
+ CPUINFO_LATENCY,
+ SCALING_CUR_FREQ,
+ SCALING_MIN_FREQ,
+ SCALING_MAX_FREQ,
+ STATS_NUM_TRANSITIONS,
+ MAX_CPUFREQ_VALUE_READ_FILES
+};
+
+static const char *cpufreq_value_files[MAX_CPUFREQ_VALUE_READ_FILES] = {
+ [CPUINFO_CUR_FREQ] = "cpuinfo_cur_freq",
+ [CPUINFO_MIN_FREQ] = "cpuinfo_min_freq",
+ [CPUINFO_MAX_FREQ] = "cpuinfo_max_freq",
+ [CPUINFO_LATENCY] = "cpuinfo_transition_latency",
+ [SCALING_CUR_FREQ] = "scaling_cur_freq",
+ [SCALING_MIN_FREQ] = "scaling_min_freq",
+ [SCALING_MAX_FREQ] = "scaling_max_freq",
+ [STATS_NUM_TRANSITIONS] = "stats/total_trans"
+};
+
+
+static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu,
+ enum cpufreq_value which)
+{
+ unsigned long value;
+ unsigned int len;
+ char linebuf[MAX_LINE_LEN];
+ char *endp;
+
+ if (which >= MAX_CPUFREQ_VALUE_READ_FILES)
+ return 0;
+
+ len = sysfs_cpufreq_read_file(cpu, cpufreq_value_files[which],
+ linebuf, sizeof(linebuf));
+
+ if (len == 0)
+ return 0;
+
+ value = strtoul(linebuf, &endp, 0);
+
+ if (endp == linebuf || errno == ERANGE)
+ return 0;
+
+ return value;
+}
+
+/* read access to files which contain one string */
+
+enum cpufreq_string {
+ SCALING_DRIVER,
+ SCALING_GOVERNOR,
+ MAX_CPUFREQ_STRING_FILES
+};
+
+static const char *cpufreq_string_files[MAX_CPUFREQ_STRING_FILES] = {
+ [SCALING_DRIVER] = "scaling_driver",
+ [SCALING_GOVERNOR] = "scaling_governor",
+};
+
+
+static char *sysfs_cpufreq_get_one_string(unsigned int cpu,
+ enum cpufreq_string which)
+{
+ char linebuf[MAX_LINE_LEN];
+ char *result;
+ unsigned int len;
+
+ if (which >= MAX_CPUFREQ_STRING_FILES)
+ return NULL;
+
+ len = sysfs_cpufreq_read_file(cpu, cpufreq_string_files[which],
+ linebuf, sizeof(linebuf));
+ if (len == 0)
+ return NULL;
+
+ result = strdup(linebuf);
+ if (result == NULL)
+ return NULL;
+
+ if (result[strlen(result) - 1] == '\n')
+ result[strlen(result) - 1] = '\0';
+
+ return result;
+}
+
+/* write access */
+
+enum cpufreq_write {
+ WRITE_SCALING_MIN_FREQ,
+ WRITE_SCALING_MAX_FREQ,
+ WRITE_SCALING_GOVERNOR,
+ WRITE_SCALING_SET_SPEED,
+ MAX_CPUFREQ_WRITE_FILES
+};
+
+static const char *cpufreq_write_files[MAX_CPUFREQ_WRITE_FILES] = {
+ [WRITE_SCALING_MIN_FREQ] = "scaling_min_freq",
+ [WRITE_SCALING_MAX_FREQ] = "scaling_max_freq",
+ [WRITE_SCALING_GOVERNOR] = "scaling_governor",
+ [WRITE_SCALING_SET_SPEED] = "scaling_setspeed",
+};
+
+static int sysfs_cpufreq_write_one_value(unsigned int cpu,
+ enum cpufreq_write which,
+ const char *new_value, size_t len)
+{
+ if (which >= MAX_CPUFREQ_WRITE_FILES)
+ return 0;
+
+ if (sysfs_cpufreq_write_file(cpu, cpufreq_write_files[which],
+ new_value, len) != len)
+ return -ENODEV;
+
+ return 0;
+};
+
+unsigned long sysfs_get_freq_kernel(unsigned int cpu)
+{
+ return sysfs_cpufreq_get_one_value(cpu, SCALING_CUR_FREQ);
+}
+
+unsigned long sysfs_get_freq_hardware(unsigned int cpu)
+{
+ return sysfs_cpufreq_get_one_value(cpu, CPUINFO_CUR_FREQ);
+}
+
+unsigned long sysfs_get_freq_transition_latency(unsigned int cpu)
+{
+ return sysfs_cpufreq_get_one_value(cpu, CPUINFO_LATENCY);
+}
+
+int sysfs_get_freq_hardware_limits(unsigned int cpu,
+ unsigned long *min,
+ unsigned long *max)
+{
+ if ((!min) || (!max))
+ return -EINVAL;
+
+ *min = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MIN_FREQ);
+ if (!*min)
+ return -ENODEV;
+
+ *max = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MAX_FREQ);
+ if (!*max)
+ return -ENODEV;
+
+ return 0;
+}
+
+char *sysfs_get_freq_driver(unsigned int cpu)
+{
+ return sysfs_cpufreq_get_one_string(cpu, SCALING_DRIVER);
+}
+
+struct cpufreq_policy *sysfs_get_freq_policy(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+
+ policy = malloc(sizeof(struct cpufreq_policy));
+ if (!policy)
+ return NULL;
+
+ policy->governor = sysfs_cpufreq_get_one_string(cpu, SCALING_GOVERNOR);
+ if (!policy->governor) {
+ free(policy);
+ return NULL;
+ }
+ policy->min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
+ policy->max = sysfs_cpufreq_get_one_value(cpu, SCALING_MAX_FREQ);
+ if ((!policy->min) || (!policy->max)) {
+ free(policy->governor);
+ free(policy);
+ return NULL;
+ }
+
+ return policy;
+}
+
+struct cpufreq_available_governors *
+sysfs_get_freq_available_governors(unsigned int cpu) {
+ struct cpufreq_available_governors *first = NULL;
+ struct cpufreq_available_governors *current = NULL;
+ char linebuf[MAX_LINE_LEN];
+ unsigned int pos, i;
+ unsigned int len;
+
+ len = sysfs_cpufreq_read_file(cpu, "scaling_available_governors",
+ linebuf, sizeof(linebuf));
+ if (len == 0)
+ return NULL;
+
+ pos = 0;
+ for (i = 0; i < len; i++) {
+ if (linebuf[i] == ' ' || linebuf[i] == '\n') {
+ if (i - pos < 2)
+ continue;
+ if (current) {
+ current->next = malloc(sizeof(*current));
+ if (!current->next)
+ goto error_out;
+ current = current->next;
+ } else {
+ first = malloc(sizeof(*first));
+ if (!first)
+ goto error_out;
+ current = first;
+ }
+ current->first = first;
+ current->next = NULL;
+
+ current->governor = malloc(i - pos + 1);
+ if (!current->governor)
+ goto error_out;
+
+ memcpy(current->governor, linebuf + pos, i - pos);
+ current->governor[i - pos] = '\0';
+ pos = i + 1;
+ }
+ }
+
+ return first;
+
+ error_out:
+ while (first) {
+ current = first->next;
+ if (first->governor)
+ free(first->governor);
+ free(first);
+ first = current;
+ }
+ return NULL;
+}
+
+
+struct cpufreq_available_frequencies *
+sysfs_get_available_frequencies(unsigned int cpu) {
+ struct cpufreq_available_frequencies *first = NULL;
+ struct cpufreq_available_frequencies *current = NULL;
+ char one_value[SYSFS_PATH_MAX];
+ char linebuf[MAX_LINE_LEN];
+ unsigned int pos, i;
+ unsigned int len;
+
+ len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies",
+ linebuf, sizeof(linebuf));
+ if (len == 0)
+ return NULL;
+
+ pos = 0;
+ for (i = 0; i < len; i++) {
+ if (linebuf[i] == ' ' || linebuf[i] == '\n') {
+ if (i - pos < 2)
+ continue;
+ if (i - pos >= SYSFS_PATH_MAX)
+ goto error_out;
+ if (current) {
+ current->next = malloc(sizeof(*current));
+ if (!current->next)
+ goto error_out;
+ current = current->next;
+ } else {
+ first = malloc(sizeof(*first));
+ if (!first)
+ goto error_out;
+ current = first;
+ }
+ current->first = first;
+ current->next = NULL;
+
+ memcpy(one_value, linebuf + pos, i - pos);
+ one_value[i - pos] = '\0';
+ if (sscanf(one_value, "%lu", &current->frequency) != 1)
+ goto error_out;
+
+ pos = i + 1;
+ }
+ }
+
+ return first;
+
+ error_out:
+ while (first) {
+ current = first->next;
+ free(first);
+ first = current;
+ }
+ return NULL;
+}
+
+static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu,
+ const char *file)
+{
+ struct cpufreq_affected_cpus *first = NULL;
+ struct cpufreq_affected_cpus *current = NULL;
+ char one_value[SYSFS_PATH_MAX];
+ char linebuf[MAX_LINE_LEN];
+ unsigned int pos, i;
+ unsigned int len;
+
+ len = sysfs_cpufreq_read_file(cpu, file, linebuf, sizeof(linebuf));
+ if (len == 0)
+ return NULL;
+
+ pos = 0;
+ for (i = 0; i < len; i++) {
+ if (i == len || linebuf[i] == ' ' || linebuf[i] == '\n') {
+ if (i - pos < 1)
+ continue;
+ if (i - pos >= SYSFS_PATH_MAX)
+ goto error_out;
+ if (current) {
+ current->next = malloc(sizeof(*current));
+ if (!current->next)
+ goto error_out;
+ current = current->next;
+ } else {
+ first = malloc(sizeof(*first));
+ if (!first)
+ goto error_out;
+ current = first;
+ }
+ current->first = first;
+ current->next = NULL;
+
+ memcpy(one_value, linebuf + pos, i - pos);
+ one_value[i - pos] = '\0';
+
+ if (sscanf(one_value, "%u", &current->cpu) != 1)
+ goto error_out;
+
+ pos = i + 1;
+ }
+ }
+
+ return first;
+
+ error_out:
+ while (first) {
+ current = first->next;
+ free(first);
+ first = current;
+ }
+ return NULL;
+}
+
+struct cpufreq_affected_cpus *sysfs_get_freq_affected_cpus(unsigned int cpu)
+{
+ return sysfs_get_cpu_list(cpu, "affected_cpus");
+}
+
+struct cpufreq_affected_cpus *sysfs_get_freq_related_cpus(unsigned int cpu)
+{
+ return sysfs_get_cpu_list(cpu, "related_cpus");
+}
+
+struct cpufreq_stats *sysfs_get_freq_stats(unsigned int cpu,
+ unsigned long long *total_time) {
+ struct cpufreq_stats *first = NULL;
+ struct cpufreq_stats *current = NULL;
+ char one_value[SYSFS_PATH_MAX];
+ char linebuf[MAX_LINE_LEN];
+ unsigned int pos, i;
+ unsigned int len;
+
+ len = sysfs_cpufreq_read_file(cpu, "stats/time_in_state",
+ linebuf, sizeof(linebuf));
+ if (len == 0)
+ return NULL;
+
+ *total_time = 0;
+ pos = 0;
+ for (i = 0; i < len; i++) {
+ if (i == strlen(linebuf) || linebuf[i] == '\n') {
+ if (i - pos < 2)
+ continue;
+ if ((i - pos) >= SYSFS_PATH_MAX)
+ goto error_out;
+ if (current) {
+ current->next = malloc(sizeof(*current));
+ if (!current->next)
+ goto error_out;
+ current = current->next;
+ } else {
+ first = malloc(sizeof(*first));
+ if (!first)
+ goto error_out;
+ current = first;
+ }
+ current->first = first;
+ current->next = NULL;
+
+ memcpy(one_value, linebuf + pos, i - pos);
+ one_value[i - pos] = '\0';
+ if (sscanf(one_value, "%lu %llu",
+ &current->frequency,
+ &current->time_in_state) != 2)
+ goto error_out;
+
+ *total_time = *total_time + current->time_in_state;
+ pos = i + 1;
+ }
+ }
+
+ return first;
+
+ error_out:
+ while (first) {
+ current = first->next;
+ free(first);
+ first = current;
+ }
+ return NULL;
+}
+
+unsigned long sysfs_get_freq_transitions(unsigned int cpu)
+{
+ return sysfs_cpufreq_get_one_value(cpu, STATS_NUM_TRANSITIONS);
+}
+
+static int verify_gov(char *new_gov, char *passed_gov)
+{
+ unsigned int i, j = 0;
+
+ if (!passed_gov || (strlen(passed_gov) > 19))
+ return -EINVAL;
+
+ strncpy(new_gov, passed_gov, 20);
+ for (i = 0; i < 20; i++) {
+ if (j) {
+ new_gov[i] = '\0';
+ continue;
+ }
+ if ((new_gov[i] >= 'a') && (new_gov[i] <= 'z'))
+ continue;
+
+ if ((new_gov[i] >= 'A') && (new_gov[i] <= 'Z'))
+ continue;
+
+ if (new_gov[i] == '-')
+ continue;
+
+ if (new_gov[i] == '_')
+ continue;
+
+ if (new_gov[i] == '\0') {
+ j = 1;
+ continue;
+ }
+ return -EINVAL;
+ }
+ new_gov[19] = '\0';
+ return 0;
+}
+
+int sysfs_modify_freq_policy_governor(unsigned int cpu, char *governor)
+{
+ char new_gov[SYSFS_PATH_MAX];
+
+ if (!governor)
+ return -EINVAL;
+
+ if (verify_gov(new_gov, governor))
+ return -EINVAL;
+
+ return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
+ new_gov, strlen(new_gov));
+};
+
+int sysfs_modify_freq_policy_max(unsigned int cpu, unsigned long max_freq)
+{
+ char value[SYSFS_PATH_MAX];
+
+ snprintf(value, SYSFS_PATH_MAX, "%lu", max_freq);
+
+ return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
+ value, strlen(value));
+};
+
+
+int sysfs_modify_freq_policy_min(unsigned int cpu, unsigned long min_freq)
+{
+ char value[SYSFS_PATH_MAX];
+
+ snprintf(value, SYSFS_PATH_MAX, "%lu", min_freq);
+
+ return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ,
+ value, strlen(value));
+};
+
+
+int sysfs_set_freq_policy(unsigned int cpu, struct cpufreq_policy *policy)
+{
+ char min[SYSFS_PATH_MAX];
+ char max[SYSFS_PATH_MAX];
+ char gov[SYSFS_PATH_MAX];
+ int ret;
+ unsigned long old_min;
+ int write_max_first;
+
+ if (!policy || !(policy->governor))
+ return -EINVAL;
+
+ if (policy->max < policy->min)
+ return -EINVAL;
+
+ if (verify_gov(gov, policy->governor))
+ return -EINVAL;
+
+ snprintf(min, SYSFS_PATH_MAX, "%lu", policy->min);
+ snprintf(max, SYSFS_PATH_MAX, "%lu", policy->max);
+
+ old_min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
+ write_max_first = (old_min && (policy->max < old_min) ? 0 : 1);
+
+ if (write_max_first) {
+ ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
+ max, strlen(max));
+ if (ret)
+ return ret;
+ }
+
+ ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ, min,
+ strlen(min));
+ if (ret)
+ return ret;
+
+ if (!write_max_first) {
+ ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
+ max, strlen(max));
+ if (ret)
+ return ret;
+ }
+
+ return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
+ gov, strlen(gov));
+}
+
+int sysfs_set_frequency(unsigned int cpu, unsigned long target_frequency)
+{
+ struct cpufreq_policy *pol = sysfs_get_freq_policy(cpu);
+ char userspace_gov[] = "userspace";
+ char freq[SYSFS_PATH_MAX];
+ int ret;
+
+ if (!pol)
+ return -ENODEV;
+
+ if (strncmp(pol->governor, userspace_gov, 9) != 0) {
+ ret = sysfs_modify_freq_policy_governor(cpu, userspace_gov);
+ if (ret) {
+ cpufreq_put_policy(pol);
+ return ret;
+ }
+ }
+
+ cpufreq_put_policy(pol);
+
+ snprintf(freq, SYSFS_PATH_MAX, "%lu", target_frequency);
+
+ return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_SET_SPEED,
+ freq, strlen(freq));
+}
+
+/* CPUFREQ sysfs access **************************************************/
+
+/* General sysfs access **************************************************/
+int sysfs_cpu_exists(unsigned int cpu)
+{
+ char file[SYSFS_PATH_MAX];
+ struct stat statbuf;
+
+ snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/", cpu);
+
+ if (stat(file, &statbuf) != 0)
+ return -ENOSYS;
+
+ return S_ISDIR(statbuf.st_mode) ? 0 : -ENOSYS;
+}
+
+/* General sysfs access **************************************************/
diff --git a/tools/power/cpupower/lib/sysfs.h b/tools/power/cpupower/lib/sysfs.h
new file mode 100644
index 00000000000..c76a5e0af50
--- /dev/null
+++ b/tools/power/cpupower/lib/sysfs.h
@@ -0,0 +1,31 @@
+/* General */
+extern unsigned int sysfs_cpu_exists(unsigned int cpu);
+
+/* CPUfreq */
+extern unsigned long sysfs_get_freq_kernel(unsigned int cpu);
+extern unsigned long sysfs_get_freq_hardware(unsigned int cpu);
+extern unsigned long sysfs_get_freq_transition_latency(unsigned int cpu);
+extern int sysfs_get_freq_hardware_limits(unsigned int cpu,
+ unsigned long *min, unsigned long *max);
+extern char *sysfs_get_freq_driver(unsigned int cpu);
+extern struct cpufreq_policy *sysfs_get_freq_policy(unsigned int cpu);
+extern struct cpufreq_available_governors *sysfs_get_freq_available_governors(
+ unsigned int cpu);
+extern struct cpufreq_available_frequencies *sysfs_get_available_frequencies(
+ unsigned int cpu);
+extern struct cpufreq_affected_cpus *sysfs_get_freq_affected_cpus(
+ unsigned int cpu);
+extern struct cpufreq_affected_cpus *sysfs_get_freq_related_cpus(
+ unsigned int cpu);
+extern struct cpufreq_stats *sysfs_get_freq_stats(unsigned int cpu,
+ unsigned long long *total_time);
+extern unsigned long sysfs_get_freq_transitions(unsigned int cpu);
+extern int sysfs_set_freq_policy(unsigned int cpu,
+ struct cpufreq_policy *policy);
+extern int sysfs_modify_freq_policy_min(unsigned int cpu,
+ unsigned long min_freq);
+extern int sysfs_modify_freq_policy_max(unsigned int cpu,
+ unsigned long max_freq);
+extern int sysfs_modify_freq_policy_governor(unsigned int cpu, char *governor);
+extern int sysfs_set_frequency(unsigned int cpu,
+ unsigned long target_frequency);
diff --git a/tools/power/cpupower/man/cpupower-frequency-info.1 b/tools/power/cpupower/man/cpupower-frequency-info.1
new file mode 100644
index 00000000000..3194811d58f
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-frequency-info.1
@@ -0,0 +1,76 @@
+.TH "cpufreq-info" "1" "0.1" "Mattia Dongili" ""
+.SH "NAME"
+.LP
+cpufreq\-info \- Utility to retrieve cpufreq kernel information
+.SH "SYNTAX"
+.LP
+cpufreq\-info [\fIoptions\fP]
+.SH "DESCRIPTION"
+.LP
+A small tool which prints out cpufreq information helpful to developers and interested users.
+.SH "OPTIONS"
+.LP
+.TP
+\fB\-e\fR \fB\-\-debug\fR
+Prints out debug information.
+.TP
+\fB\-f\fR \fB\-\-freq\fR
+Get frequency the CPU currently runs at, according to the cpufreq core.
+.TP
+\fB\-w\fR \fB\-\-hwfreq\fR
+Get frequency the CPU currently runs at, by reading it from hardware (only available to root).
+.TP
+\fB\-l\fR \fB\-\-hwlimits\fR
+Determine the minimum and maximum CPU frequency allowed.
+.TP
+\fB\-d\fR \fB\-\-driver\fR
+Determines the used cpufreq kernel driver.
+.TP
+\fB\-p\fR \fB\-\-policy\fR
+Gets the currently used cpufreq policy.
+.TP
+\fB\-g\fR \fB\-\-governors\fR
+Determines available cpufreq governors.
+.TP
+\fB\-a\fR \fB\-\-related\-cpus\fR
+Determines which CPUs run at the same hardware frequency.
+.TP
+\fB\-a\fR \fB\-\-affected\-cpus\fR
+Determines which CPUs need to have their frequency coordinated by software.
+.TP
+\fB\-s\fR \fB\-\-stats\fR
+Shows cpufreq statistics if available.
+.TP
+\fB\-y\fR \fB\-\-latency\fR
+Determines the maximum latency on CPU frequency changes.
+.TP
+\fB\-o\fR \fB\-\-proc\fR
+Prints out information like provided by the /proc/cpufreq interface in 2.4. and early 2.6. kernels.
+.TP
+\fB\-m\fR \fB\-\-human\fR
+human\-readable output for the \-f, \-w, \-s and \-y parameters.
+.TP
+\fB\-h\fR \fB\-\-help\fR
+Prints out the help screen.
+.SH "REMARKS"
+.LP
+By default only values of core zero are displayed. How to display settings of
+other cores is described in the cpupower(1) manpage in the \-\-cpu option section.
+.LP
+You can't specify more than one of the output specific options \-o \-e \-a \-g \-p \-d \-l \-w \-f \-y.
+.LP
+You also can't specify the \-o option combined with the \-c option.
+.SH "FILES"
+.nf
+\fI/sys/devices/system/cpu/cpu*/cpufreq/\fP
+\fI/proc/cpufreq\fP (deprecated)
+\fI/proc/sys/cpu/\fP (deprecated)
+.fi
+.SH "AUTHORS"
+.nf
+Dominik Brodowski <linux@brodo.de> \- author
+Mattia Dongili<malattia@gmail.com> \- first autolibtoolization
+.fi
+.SH "SEE ALSO"
+.LP
+cpupower\-frequency\-set(1), cpupower(1)
diff --git a/tools/power/cpupower/man/cpupower-frequency-set.1 b/tools/power/cpupower/man/cpupower-frequency-set.1
new file mode 100644
index 00000000000..26e3e13eee3
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-frequency-set.1
@@ -0,0 +1,54 @@
+.TH "cpufreq-set" "1" "0.1" "Mattia Dongili" ""
+.SH "NAME"
+.LP
+cpufreq\-set \- A small tool which allows to modify cpufreq settings.
+.SH "SYNTAX"
+.LP
+cpufreq\-set [\fIoptions\fP]
+.SH "DESCRIPTION"
+.LP
+cpufreq\-set allows you to modify cpufreq settings without having to type e.g. "/sys/devices/system/cpu/cpu0/cpufreq/scaling_set_speed" all the time.
+.SH "OPTIONS"
+.LP
+.TP
+\fB\-d\fR \fB\-\-min\fR <FREQ>
+new minimum CPU frequency the governor may select.
+.TP
+\fB\-u\fR \fB\-\-max\fR <FREQ>
+new maximum CPU frequency the governor may select.
+.TP
+\fB\-g\fR \fB\-\-governor\fR <GOV>
+new cpufreq governor.
+.TP
+\fB\-f\fR \fB\-\-freq\fR <FREQ>
+specific frequency to be set. Requires userspace governor to be available and loaded.
+.TP
+\fB\-r\fR \fB\-\-related\fR
+modify all hardware-related CPUs at the same time
+.TP
+\fB\-h\fR \fB\-\-help\fR
+Prints out the help screen.
+.SH "REMARKS"
+.LP
+By default values are applied on all cores. How to modify single core
+configurations is described in the cpupower(1) manpage in the \-\-cpu option section.
+.LP
+The \-f FREQ, \-\-freq FREQ parameter cannot be combined with any other parameter.
+.LP
+FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz by postfixing the value with the wanted unit name, without any space (frequency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).
+.LP
+On Linux kernels up to 2.6.29, the \-r or \-\-related parameter is ignored.
+.SH "FILES"
+.nf
+\fI/sys/devices/system/cpu/cpu*/cpufreq/\fP
+\fI/proc/cpufreq\fP (deprecated)
+\fI/proc/sys/cpu/\fP (deprecated)
+.fi
+.SH "AUTHORS"
+.nf
+Dominik Brodowski <linux@brodo.de> \- author
+Mattia Dongili<malattia@gmail.com> \- first autolibtoolization
+.fi
+.SH "SEE ALSO"
+.LP
+cpupower\-frequency\-info(1), cpupower(1)
diff --git a/tools/power/cpupower/man/cpupower-info.1 b/tools/power/cpupower/man/cpupower-info.1
new file mode 100644
index 00000000000..58e21196f17
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-info.1
@@ -0,0 +1,19 @@
+.TH CPUPOWER\-INFO "1" "22/02/2011" "" "cpupower Manual"
+.SH NAME
+cpupower\-info \- Shows processor power related kernel or hardware configurations
+.SH SYNOPSIS
+.ft B
+.B cpupower info [ \-b ] [ \-s ] [ \-m ]
+
+.SH DESCRIPTION
+\fBcpupower info \fP shows kernel configurations or processor hardware
+registers affecting processor power saving policies.
+
+Some options are platform wide, some affect single cores. By default values
+of core zero are displayed only. cpupower --cpu all cpuinfo will show the
+settings of all cores, see cpupower(1) how to choose specific cores.
+
+.SH "SEE ALSO"
+Options are described in detail in:
+
+cpupower(1), cpupower-set(1)
diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1
new file mode 100644
index 00000000000..d5cfa265c3d
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-monitor.1
@@ -0,0 +1,179 @@
+.TH CPUPOWER\-MONITOR "1" "22/02/2011" "" "cpupower Manual"
+.SH NAME
+cpupower\-monitor \- Report processor frequency and idle statistics
+.SH SYNOPSIS
+.ft B
+.B cpupower monitor
+.RB "\-l"
+
+.B cpupower monitor
+.RB [ "\-m <mon1>," [ "<mon2>,..." ] ]
+.RB [ "\-i seconds" ]
+.br
+.B cpupower monitor
+.RB [ "\-m <mon1>," [ "<mon2>,..." ] ]
+.RB command
+.br
+.SH DESCRIPTION
+\fBcpupower-monitor \fP reports processor topology, frequency and idle power
+state statistics. Either \fBcommand\fP is forked and
+statistics are printed upon its completion, or statistics are printed periodically.
+
+\fBcpupower-monitor \fP implements independent processor sleep state and
+frequency counters. Some are retrieved from kernel statistics, some are
+directly reading out hardware registers. Use \-l to get an overview which are
+supported on your system.
+
+.SH Options
+.PP
+\-l
+.RS 4
+List available monitors on your system. Additional details about each monitor
+are shown:
+.RS 2
+.IP \(bu
+The name in quotation marks which can be passed to the \-m parameter.
+.IP \(bu
+The number of different counters the monitor supports in brackets.
+.IP \(bu
+The amount of time in seconds the counters might overflow, due to
+implementation constraints.
+.IP \(bu
+The name and a description of each counter and its processor hierarchy level
+coverage in square brackets:
+.RS 4
+.IP \(bu
+[T] \-> Thread
+.IP \(bu
+[C] \-> Core
+.IP \(bu
+[P] \-> Processor Package (Socket)
+.IP \(bu
+[M] \-> Machine/Platform wide counter
+.RE
+.RE
+.RE
+.PP
+\-m <mon1>,<mon2>,...
+.RS 4
+Only display specific monitors. Use the monitor string(s) provided by \-l option.
+.RE
+.PP
+\-i seconds
+.RS 4
+Measure intervall.
+.RE
+.PP
+command
+.RS 4
+Measure idle and frequency characteristics of an arbitrary command/workload.
+The executable \fBcommand\fP is forked and upon its exit, statistics gathered since it was
+forked are displayed.
+.RE
+.PP
+\-v
+.RS 4
+Increase verbosity if the binary was compiled with the DEBUG option set.
+.RE
+
+.SH MONITOR DESCRIPTIONS
+.SS "Idle_Stats"
+Shows statistics of the cpuidle kernel subsystem. Values are retrieved from
+/sys/devices/system/cpu/cpu*/cpuidle/state*/.
+The kernel updates these values every time an idle state is entered or
+left. Therefore there can be some inaccuracy when cores are in an idle
+state for some time when the measure starts or ends. In worst case it can happen
+that one core stayed in an idle state for the whole measure time and the idle
+state usage time as exported by the kernel did not get updated. In this case
+a state residency of 0 percent is shown while it was 100.
+
+.SS "Mperf"
+The name comes from the aperf/mperf (average and maximum) MSR registers used
+which are available on recent X86 processors. It shows the average frequency
+(including boost frequencies).
+The fact that on all recent hardware the mperf timer stops ticking in any idle
+state it is also used to show C0 (processor is active) and Cx (processor is in
+any sleep state) times. These counters do not have the inaccuracy restrictions
+the "Idle_Stats" counters may show.
+May work poorly on Linux-2.6.20 through 2.6.29, as the \fBacpi-cpufreq \fP
+kernel frequency driver periodically cleared aperf/mperf registers in those
+kernels.
+
+.SS "Nehalem" "SandyBridge"
+Intel Core and Package sleep state counters.
+Threads (hyperthreaded cores) may not be able to enter deeper core states if
+its sibling is utilized.
+Deepest package sleep states may in reality show up as machine/platform wide
+sleep states and can only be entered if all cores are idle. Look up Intel
+manuals (some are provided in the References section) for further details.
+
+.SS "Ontario" "Liano"
+AMD laptop and desktop processor (family 12h and 14h) sleep state counters.
+The registers are accessed via PCI and therefore can still be read out while
+cores have been offlined.
+
+There is one special counter: NBP1 (North Bridge P1).
+This one always returns 0 or 1, depending on whether the North Bridge P1
+power state got entered at least once during measure time.
+Being able to enter NBP1 state also depends on graphics power management.
+Therefore this counter can be used to verify whether the graphics' driver
+power management is working as expected.
+
+.SH EXAMPLES
+
+cpupower monitor -l" may show:
+.RS 4
+Monitor "Mperf" (3 states) \- Might overflow after 922000000 s
+
+ ...
+
+Monitor "Idle_Stats" (3 states) \- Might overflow after 4294967295 s
+
+ ...
+
+.RE
+cpupower monitor \-m "Idle_Stats,Mperf" scp /tmp/test /nfs/tmp
+
+Monitor the scp command, show both Mperf and Idle_Stats states counter
+statistics, but in exchanged order.
+
+
+
+.RE
+Be careful that the typical command to fully utilize one CPU by doing:
+
+cpupower monitor cat /dev/zero >/dev/null
+
+Does not work as expected, because the measured output is redirected to
+/dev/null. This could get workarounded by putting the line into an own, tiny
+shell script. Hit CTRL\-c to terminate the command and get the measure output
+displayed.
+
+.SH REFERENCES
+"BIOS and Kernel Developer’s Guide (BKDG) for AMD Family 14h Processors"
+http://support.amd.com/us/Processor_TechDocs/43170.pdf
+
+"Intel® Turbo Boost Technology
+in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
+http://download.intel.com/design/processor/applnots/320354.pdf
+
+"Intel® 64 and IA-32 Architectures Software Developer's Manual
+Volume 3B: System Programming Guide"
+http://www.intel.com/products/processor/manuals
+
+.SH FILES
+.ta
+.nf
+/dev/cpu/*/msr
+/sys/devices/system/cpu/cpu*/cpuidle/state*/.
+.fi
+
+.SH "SEE ALSO"
+powertop(8), msr(4), vmstat(8)
+.PP
+.SH AUTHORS
+.nf
+Written by Thomas Renninger <trenn@suse.de>
+
+Nehalem, SandyBridge monitors and command passing
+based on turbostat.8 from Len Brown <len.brown@intel.com>
diff --git a/tools/power/cpupower/man/cpupower-set.1 b/tools/power/cpupower/man/cpupower-set.1
new file mode 100644
index 00000000000..c4954a9fe4e
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-set.1
@@ -0,0 +1,103 @@
+.TH CPUPOWER\-SET "1" "22/02/2011" "" "cpupower Manual"
+.SH NAME
+cpupower\-set \- Set processor power related kernel or hardware configurations
+.SH SYNOPSIS
+.ft B
+.B cpupower set [ \-b VAL ] [ \-s VAL ] [ \-m VAL ]
+
+
+.SH DESCRIPTION
+\fBcpupower set \fP sets kernel configurations or directly accesses hardware
+registers affecting processor power saving policies.
+
+Some options are platform wide, some affect single cores. By default values
+are applied on all cores. How to modify single core configurations is
+described in the cpupower(1) manpage in the \-\-cpu option section. Whether an
+option affects the whole system or can be applied to individual cores is
+described in the Options sections.
+
+Use \fBcpupower info \fP to read out current settings and whether they are
+supported on the system at all.
+
+.SH Options
+.PP
+\-\-perf-bias, \-b
+.RS 4
+Sets a register on supported Intel processore which allows software to convey
+its policy for the relative importance of performance versus energy savings to
+the processor.
+
+The range of valid numbers is 0-15, where 0 is maximum
+performance and 15 is maximum energy efficiency.
+
+The processor uses this information in model-specific ways
+when it must select trade-offs between performance and
+energy efficiency.
+
+This policy hint does not supersede Processor Performance states
+(P-states) or CPU Idle power states (C-states), but allows
+software to have influence where it would otherwise be unable
+to express a preference.
+
+For example, this setting may tell the hardware how
+aggressively or conservatively to control frequency
+in the "turbo range" above the explicitly OS-controlled
+P-state frequency range. It may also tell the hardware
+how aggressively it should enter the OS requested C-states.
+
+This option can be applied to individual cores only via the \-\-cpu option,
+cpupower(1).
+
+Setting the performance bias value on one CPU can modify the setting on
+related CPUs as well (for example all CPUs on one socket), because of
+hardware restrictions.
+Use \fBcpupower -c all info -b\fP to verify.
+
+This options needs the msr kernel driver (CONFIG_X86_MSR) loaded.
+.RE
+.PP
+\-\-sched\-mc, \-m [ VAL ]
+.RE
+\-\-sched\-smt, \-s [ VAL ]
+.RS 4
+\-\-sched\-mc utilizes cores in one processor package/socket first before
+processes are scheduled to other processor packages/sockets.
+
+\-\-sched\-smt utilizes thread siblings of one processor core first before
+processes are scheduled to other cores.
+
+The impact on power consumption and performance (positiv or negativ) heavily
+depends on processor support for deep sleep states, frequency scaling and
+frequency boost modes and their dependencies between other thread siblings
+and processor cores.
+
+Taken over from kernel documentation:
+
+Adjust the kernel's multi-core scheduler support.
+
+Possible values are:
+.RS 2
+0 - No power saving load balance (default value)
+
+1 - Fill one thread/core/package first for long running threads
+
+2 - Also bias task wakeups to semi-idle cpu package for power
+savings
+.RE
+
+sched_mc_power_savings is dependent upon SCHED_MC, which is
+itself architecture dependent.
+
+sched_smt_power_savings is dependent upon SCHED_SMT, which
+is itself architecture dependent.
+
+The two files are independent of each other. It is possible
+that one file may be present without the other.
+
+.SH "SEE ALSO"
+cpupower-info(1), cpupower-monitor(1), powertop(1)
+.PP
+.SH AUTHORS
+.nf
+\-\-perf\-bias parts written by Len Brown <len.brown@intel.com>
+Thomas Renninger <trenn@suse.de>
diff --git a/tools/power/cpupower/man/cpupower.1 b/tools/power/cpupower/man/cpupower.1
new file mode 100644
index 00000000000..78c20feab85
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower.1
@@ -0,0 +1,72 @@
+.TH CPUPOWER "1" "07/03/2011" "" "cpupower Manual"
+.SH NAME
+cpupower \- Shows and sets processor power related values
+.SH SYNOPSIS
+.ft B
+.B cpupower [ \-c cpulist ] subcommand [ARGS]
+
+.B cpupower \-v|\-\-version
+
+.B cpupower \-h|\-\-help
+
+.SH DESCRIPTION
+\fBcpupower \fP is a collection of tools to examine and tune power saving
+related features of your processor.
+
+The manpages of the subcommands (cpupower\-<subcommand>(1)) provide detailed
+descriptions of supported features. Run \fBcpupower help\fP to get an overview
+of supported subcommands.
+
+.SH Options
+.PP
+\-\-help, \-h
+.RS 4
+Shows supported subcommands and general usage.
+.RE
+.PP
+\-\-cpu cpulist, \-c cpulist
+.RS 4
+Only show or set values for specific cores.
+This option is not supported by all subcommands, details can be found in the
+manpages of the subcommands.
+
+Some subcommands access all cores (typically the *\-set commands), some only
+the first core (typically the *\-info commands) by default.
+
+The syntax for <cpulist> is based on how the kernel exports CPU bitmasks via
+sysfs files. Some examples:
+.RS 4
+.TP 16
+Input
+Equivalent to
+.TP
+all
+all cores
+.TP
+0\-3
+0,1,2,3
+.TP
+0\-7:2
+0,2,4,6
+.TP
+1,3,5-7
+1,3,5,6,7
+.TP
+0\-3:2,8\-15:4
+0,2,8,12
+.RE
+.RE
+.PP
+\-\-version, \-v
+.RS 4
+Print the package name and version number.
+
+.SH "SEE ALSO"
+cpupower-set(1), cpupower-info(1), cpupower-idle(1),
+cpupower-frequency-set(1), cpupower-frequency-info(1), cpupower-monitor(1),
+powertop(1)
+.PP
+.SH AUTHORS
+.nf
+\-\-perf\-bias parts written by Len Brown <len.brown@intel.com>
+Thomas Renninger <trenn@suse.de>
diff --git a/tools/power/cpupower/po/cs.po b/tools/power/cpupower/po/cs.po
new file mode 100644
index 00000000000..cb22c45c506
--- /dev/null
+++ b/tools/power/cpupower/po/cs.po
@@ -0,0 +1,944 @@
+# translation of cs.po to Czech
+# Czech translation for cpufrequtils package
+# Czech messages for cpufrequtils.
+# Copyright (C) 2007 kavol
+# This file is distributed under the same license as the cpufrequtils package.
+#
+# Karel Volný <kavol@seznam.cz>, 2007, 2008.
+msgid ""
+msgstr ""
+"Project-Id-Version: cs\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2011-03-08 17:03+0100\n"
+"PO-Revision-Date: 2008-06-11 16:26+0200\n"
+"Last-Translator: Karel Volný <kavol@seznam.cz>\n"
+"Language-Team: Czech <diskuze@lists.l10n.cz>\n"
+"Language: cs\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
+"X-Generator: KBabel 1.11.4\n"
+
+#: utils/idle_monitor/nhm_idle.c:36
+msgid "Processor Core C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:43
+msgid "Processor Core C6"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:51
+msgid "Processor Package C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:58 utils/idle_monitor/amd_fam14h_idle.c:70
+msgid "Processor Package C6"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:33
+msgid "Processor Core C7"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:40
+msgid "Processor Package C2"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:47
+msgid "Processor Package C7"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:56
+msgid "Package in sleep state (PC1 or deeper)"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:63
+msgid "Processor Package C1"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:77
+msgid "North Bridge P1 boolean counter (returns 0 or 1)"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:35
+msgid "Processor Core not idle"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:42
+msgid "Processor Core in an idle state"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:50
+msgid "Average Frequency (including boost) in MHz"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:66
+#, c-format
+msgid ""
+"cpupower monitor: [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:69
+#, c-format
+msgid ""
+"cpupower monitor: [-v] [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:71
+#, c-format
+msgid "\t -v: be more verbose\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:73
+#, c-format
+msgid "\t -h: print this help\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:74
+#, c-format
+msgid "\t -i: time intervall to measure for in seconds (default 1)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:75
+#, c-format
+msgid "\t -t: show CPU topology/hierarchy\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:76
+#, c-format
+msgid "\t -l: list available CPU sleep monitors (for use with -m)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:77
+#, c-format
+msgid "\t -m: show specific CPU sleep monitors only (in same order)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:79
+#, c-format
+msgid ""
+"only one of: -t, -l, -m are allowed\n"
+"If none of them is passed,"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:80
+#, c-format
+msgid " all supported monitors are shown\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:197
+#, c-format
+msgid "Monitor %s, Counter %s has no count function. Implementation error\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:207
+#, c-format
+msgid " *is offline\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:236
+#, c-format
+msgid "%s: max monitor name length (%d) exceeded\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:250
+#, c-format
+msgid "No matching monitor found in %s, try -l option\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:266
+#, c-format
+msgid "Monitor \"%s\" (%d states) - Might overflow after %u s\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:319
+#, c-format
+msgid "%s took %.5f seconds and exited with status %d\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:406
+#, c-format
+msgid "Cannot read number of available processors\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:417
+#, c-format
+msgid "Available monitor %s needs root access\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:428
+#, c-format
+msgid "No HW Cstate monitors found\n"
+msgstr ""
+
+#: utils/cpupower.c:78
+#, c-format
+msgid "cpupower [ -c cpulist ] subcommand [ARGS]\n"
+msgstr ""
+
+#: utils/cpupower.c:79
+#, c-format
+msgid "cpupower --version\n"
+msgstr ""
+
+#: utils/cpupower.c:80
+#, c-format
+msgid "Supported subcommands are:\n"
+msgstr ""
+
+#: utils/cpupower.c:83
+#, c-format
+msgid ""
+"\n"
+"Some subcommands can make use of the -c cpulist option.\n"
+msgstr ""
+
+#: utils/cpupower.c:84
+#, c-format
+msgid "Look at the general cpupower manpage how to use it\n"
+msgstr ""
+
+#: utils/cpupower.c:85
+#, c-format
+msgid "and read up the subcommand's manpage whether it is supported.\n"
+msgstr ""
+
+#: utils/cpupower.c:86
+#, c-format
+msgid ""
+"\n"
+"Use cpupower help subcommand for getting help for above subcommands.\n"
+msgstr ""
+
+#: utils/cpupower.c:91
+#, c-format
+msgid "Report errors and bugs to %s, please.\n"
+msgstr ""
+"Chyby v programu prosím hlaste na %s (anglicky).\n"
+"Chyby v pÅ™ekladu prosím hlaste na kavol@seznam.cz (Äesky ;-)\n"
+
+#: utils/cpupower.c:114
+#, c-format
+msgid "Error parsing cpu list\n"
+msgstr ""
+
+#: utils/cpupower.c:172
+#, c-format
+msgid "Subcommand %s needs root privileges\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:31
+#, c-format
+msgid "Couldn't count the number of CPUs (%s: %s), assuming 1\n"
+msgstr "Nelze zjistit poÄet CPU (%s: %s), pÅ™edpokládá se 1.\n"
+
+#: utils/cpufreq-info.c:63
+#, c-format
+msgid ""
+" minimum CPU frequency - maximum CPU frequency - governor\n"
+msgstr ""
+" minimální frekvence CPU - maximální frekvence CPU - regulátor\n"
+
+#: utils/cpufreq-info.c:151
+#, c-format
+msgid "Error while evaluating Boost Capabilities on CPU %d -- are you root?\n"
+msgstr ""
+
+#. P state changes via MSR are identified via cpuid 80000007
+#. on Intel and AMD, but we assume boost capable machines can do that
+#. if (cpuid_eax(0x80000000) >= 0x80000007
+#. && (cpuid_edx(0x80000007) & (1 << 7)))
+#.
+#: utils/cpufreq-info.c:161
+#, c-format
+msgid " boost state support: \n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163
+#, c-format
+msgid " Supported: %s\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "yes"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "no"
+msgstr ""
+
+#: utils/cpufreq-info.c:164
+#, fuzzy, c-format
+msgid " Active: %s\n"
+msgstr " ovladaÄ: %s\n"
+
+#: utils/cpufreq-info.c:177
+#, c-format
+msgid " Boost States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:178
+#, c-format
+msgid " Total States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:181
+#, c-format
+msgid " Pstate-Pb%d: %luMHz (boost state)\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:184
+#, c-format
+msgid " Pstate-P%d: %luMHz\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:211
+#, c-format
+msgid " no or unknown cpufreq driver is active on this CPU\n"
+msgstr " pro tento CPU není aktivní žádný známý ovladaÄ cpufreq\n"
+
+#: utils/cpufreq-info.c:213
+#, c-format
+msgid " driver: %s\n"
+msgstr " ovladaÄ: %s\n"
+
+#: utils/cpufreq-info.c:219
+#, fuzzy, c-format
+msgid " CPUs which run at the same hardware frequency: "
+msgstr " CPU, které musí měnit frekvenci zároveň: "
+
+#: utils/cpufreq-info.c:230
+#, fuzzy, c-format
+msgid " CPUs which need to have their frequency coordinated by software: "
+msgstr " CPU, které musí měnit frekvenci zároveň: "
+
+#: utils/cpufreq-info.c:241
+#, c-format
+msgid " maximum transition latency: "
+msgstr ""
+
+#: utils/cpufreq-info.c:247
+#, c-format
+msgid " hardware limits: "
+msgstr " hardwarové meze: "
+
+#: utils/cpufreq-info.c:256
+#, c-format
+msgid " available frequency steps: "
+msgstr " dostupné frekvence: "
+
+#: utils/cpufreq-info.c:269
+#, c-format
+msgid " available cpufreq governors: "
+msgstr " dostupné regulátory: "
+
+#: utils/cpufreq-info.c:280
+#, c-format
+msgid " current policy: frequency should be within "
+msgstr " souÄasná taktika: frekvence by mÄ›la být mezi "
+
+#: utils/cpufreq-info.c:282
+#, c-format
+msgid " and "
+msgstr " a "
+
+#: utils/cpufreq-info.c:286
+#, c-format
+msgid ""
+"The governor \"%s\" may decide which speed to use\n"
+" within this range.\n"
+msgstr ""
+" Regulátor \"%s\" může rozhodnout jakou frekvenci použít\n"
+" v těchto mezích.\n"
+
+#: utils/cpufreq-info.c:293
+#, c-format
+msgid " current CPU frequency is "
+msgstr " souÄasná frekvence CPU je "
+
+#: utils/cpufreq-info.c:296
+#, c-format
+msgid " (asserted by call to hardware)"
+msgstr " (zjištěno hardwarovým voláním)"
+
+#: utils/cpufreq-info.c:304
+#, c-format
+msgid " cpufreq stats: "
+msgstr " statistika cpufreq: "
+
+#: utils/cpufreq-info.c:472
+#, fuzzy, c-format
+msgid "Usage: cpupower freqinfo [options]\n"
+msgstr "Užití: cpufreq-info [pÅ™epínaÄe]\n"
+
+#: utils/cpufreq-info.c:473 utils/cpufreq-set.c:26 utils/cpupower-set.c:23
+#: utils/cpupower-info.c:22 utils/cpuidle-info.c:148
+#, c-format
+msgid "Options:\n"
+msgstr "PÅ™epínaÄe:\n"
+
+#: utils/cpufreq-info.c:474
+#, fuzzy, c-format
+msgid " -e, --debug Prints out debug information [default]\n"
+msgstr " -e, --debug Vypíše ladicí informace\n"
+
+#: utils/cpufreq-info.c:475
+#, c-format
+msgid ""
+" -f, --freq Get frequency the CPU currently runs at, according\n"
+" to the cpufreq core *\n"
+msgstr ""
+" -f, --freq Zjistí aktuální frekvenci, na které CPU běží\n"
+" podle cpufreq *\n"
+
+#: utils/cpufreq-info.c:477
+#, c-format
+msgid ""
+" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n"
+" it from hardware (only available to root) *\n"
+msgstr ""
+" -w, --hwfreq Zjistí aktuální frekvenci, na které CPU běží\n"
+" z hardware (dostupné jen uživateli root) *\n"
+
+#: utils/cpufreq-info.c:479
+#, c-format
+msgid ""
+" -l, --hwlimits Determine the minimum and maximum CPU frequency "
+"allowed *\n"
+msgstr ""
+" -l, --hwlimits Zjistí minimální a maximální dostupnou frekvenci CPU "
+"*\n"
+
+#: utils/cpufreq-info.c:480
+#, c-format
+msgid " -d, --driver Determines the used cpufreq kernel driver *\n"
+msgstr " -d, --driver Zjistí aktivní ovladaÄ cpufreq *\n"
+
+#: utils/cpufreq-info.c:481
+#, c-format
+msgid " -p, --policy Gets the currently used cpufreq policy *\n"
+msgstr " -p, --policy Zjistí aktuální taktiku cpufreq *\n"
+
+#: utils/cpufreq-info.c:482
+#, c-format
+msgid " -g, --governors Determines available cpufreq governors *\n"
+msgstr " -g, --governors Zjistí dostupné regulátory cpufreq *\n"
+
+#: utils/cpufreq-info.c:483
+#, fuzzy, c-format
+msgid ""
+" -r, --related-cpus Determines which CPUs run at the same hardware "
+"frequency *\n"
+msgstr ""
+" -a, --affected-cpus Zjistí, které CPU musí měnit frekvenci zároveň *\n"
+
+#: utils/cpufreq-info.c:484
+#, fuzzy, c-format
+msgid ""
+" -a, --affected-cpus Determines which CPUs need to have their frequency\n"
+" coordinated by software *\n"
+msgstr ""
+" -a, --affected-cpus Zjistí, které CPU musí měnit frekvenci zároveň *\n"
+
+#: utils/cpufreq-info.c:486
+#, c-format
+msgid " -s, --stats Shows cpufreq statistics if available\n"
+msgstr " -s, --stats Zobrazí statistiku cpufreq, je-li dostupná\n"
+
+#: utils/cpufreq-info.c:487
+#, fuzzy, c-format
+msgid ""
+" -y, --latency Determines the maximum latency on CPU frequency "
+"changes *\n"
+msgstr ""
+" -l, --hwlimits Zjistí minimální a maximální dostupnou frekvenci CPU "
+"*\n"
+
+#: utils/cpufreq-info.c:488
+#, c-format
+msgid " -b, --boost Checks for turbo or boost modes *\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:489
+#, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"cpufreq\n"
+" interface in 2.4. and early 2.6. kernels\n"
+msgstr ""
+" -o, --proc Vypíše informace ve formátu, jaký používalo rozhraní\n"
+" /proc/cpufreq v kernelech Å™ady 2.4 a Äasné 2.6\n"
+
+#: utils/cpufreq-info.c:491
+#, fuzzy, c-format
+msgid ""
+" -m, --human human-readable output for the -f, -w, -s and -y "
+"parameters\n"
+msgstr ""
+" -m, --human Výstup parametrů -f, -w a -s v „lidmi Äitelném“ "
+"formátu\n"
+
+#: utils/cpufreq-info.c:492 utils/cpuidle-info.c:152
+#, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help Vypíše tuto nápovědu\n"
+
+#: utils/cpufreq-info.c:495
+#, c-format
+msgid ""
+"If no argument or only the -c, --cpu parameter is given, debug output about\n"
+"cpufreq is printed which is useful e.g. for reporting bugs.\n"
+msgstr ""
+"Není-li zadán žádný parametr nebo je-li zadán pouze pÅ™epínaÄ -c, --cpu, "
+"jsou\n"
+"vypsány ladicí informace, což může být užiteÄné například pÅ™i hlášení chyb.\n"
+
+#: utils/cpufreq-info.c:497
+#, c-format
+msgid ""
+"For the arguments marked with *, omitting the -c or --cpu argument is\n"
+"equivalent to setting it to zero\n"
+msgstr ""
+"Není-li pÅ™i použití pÅ™epínaÄů oznaÄených * zadán parametr -c nebo --cpu,\n"
+"předpokládá se jeho hodnota 0.\n"
+
+#: utils/cpufreq-info.c:580
+#, c-format
+msgid ""
+"The argument passed to this tool can't be combined with passing a --cpu "
+"argument\n"
+msgstr "Zadaný parametr nemůže být použit zároveň s pÅ™epínaÄem -c nebo --cpu\n"
+
+#: utils/cpufreq-info.c:596
+#, c-format
+msgid ""
+"You can't specify more than one --cpu parameter and/or\n"
+"more than one output-specific argument\n"
+msgstr ""
+"Nelze zadat více než jeden parametr -c nebo --cpu\n"
+"anebo více než jeden parametr urÄující výstup\n"
+
+#: utils/cpufreq-info.c:600 utils/cpufreq-set.c:82 utils/cpupower-set.c:42
+#: utils/cpupower-info.c:42 utils/cpuidle-info.c:213
+#, c-format
+msgid "invalid or unknown argument\n"
+msgstr "neplatný nebo neznámý parametr\n"
+
+#: utils/cpufreq-info.c:617
+#, c-format
+msgid "couldn't analyze CPU %d as it doesn't seem to be present\n"
+msgstr "nelze analyzovat CPU %d, vypadá to, že není přítomen\n"
+
+#: utils/cpufreq-info.c:620 utils/cpupower-info.c:142
+#, c-format
+msgid "analyzing CPU %d:\n"
+msgstr "analyzuji CPU %d:\n"
+
+#: utils/cpufreq-set.c:25
+#, fuzzy, c-format
+msgid "Usage: cpupower frequency-set [options]\n"
+msgstr "Užití: cpufreq-set [pÅ™epínaÄe]\n"
+
+#: utils/cpufreq-set.c:27
+#, c-format
+msgid ""
+" -d FREQ, --min FREQ new minimum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -d FREQ, --min FREQ Nová nejnižší frekvence, kterou může regulátor "
+"vybrat\n"
+
+#: utils/cpufreq-set.c:28
+#, c-format
+msgid ""
+" -u FREQ, --max FREQ new maximum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -u FREQ, --max FREQ Nová nejvyšší frekvence, kterou může regulátor "
+"zvolit\n"
+
+#: utils/cpufreq-set.c:29
+#, c-format
+msgid " -g GOV, --governor GOV new cpufreq governor\n"
+msgstr " -g GOV, --governors GOV Nový regulátor cpufreq\n"
+
+#: utils/cpufreq-set.c:30
+#, c-format
+msgid ""
+" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n"
+" governor to be available and loaded\n"
+msgstr ""
+" -f FREQ, --freq FREQ Frekvence, která má být nastavena. Vyžaduje, aby "
+"byl\n"
+" v jádře nahrán regulátor ‚userspace‘.\n"
+
+#: utils/cpufreq-set.c:32
+#, c-format
+msgid " -r, --related Switches all hardware-related CPUs\n"
+msgstr ""
+
+#: utils/cpufreq-set.c:33 utils/cpupower-set.c:28 utils/cpupower-info.c:27
+#, fuzzy, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help Vypíše tuto nápovědu\n"
+
+#: utils/cpufreq-set.c:35
+#, fuzzy, c-format
+msgid ""
+"Notes:\n"
+"1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n"
+msgstr ""
+"Není-li pÅ™i použití pÅ™epínaÄů oznaÄených * zadán parametr -c nebo --cpu,\n"
+"předpokládá se jeho hodnota 0.\n"
+
+#: utils/cpufreq-set.c:37
+#, fuzzy, c-format
+msgid ""
+"2. The -f FREQ, --freq FREQ parameter cannot be combined with any other "
+"parameter\n"
+" except the -c CPU, --cpu CPU parameter\n"
+"3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n"
+" by postfixing the value with the wanted unit name, without any space\n"
+" (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"
+msgstr ""
+"Poznámky:\n"
+"1. Vynechání parametru -c nebo --cpu je ekvivalentní jeho nastavení na 0\n"
+"2. PÅ™epínaÄ -f nebo --freq nemůže být použit zároveň s žádným jiným vyjma -"
+"c\n"
+" nebo --cpu\n"
+"3. Frekvence (FREQ) mohou být zadány v Hz, kHz (výchozí), MHz, GHz nebo THz\n"
+" pÅ™ipojením názvu jednotky bez mezery mezi Äíslem a jednotkou\n"
+" (FREQ v kHz =^ Hz * 0,001 = ^ MHz * 1000 =^ GHz * 1000000)\n"
+
+#: utils/cpufreq-set.c:57
+#, c-format
+msgid ""
+"Error setting new values. Common errors:\n"
+"- Do you have proper administration rights? (super-user?)\n"
+"- Is the governor you requested available and modprobed?\n"
+"- Trying to set an invalid policy?\n"
+"- Trying to set a specific frequency, but userspace governor is not "
+"available,\n"
+" for example because of hardware which cannot be set to a specific "
+"frequency\n"
+" or because the userspace governor isn't loaded?\n"
+msgstr ""
+"Chyba při nastavování nových hodnot. Obvyklé problémy:\n"
+"- Máte patÅ™iÄná administrátorská práva? (root?)\n"
+"- Je požadovaný regulátor dostupný v jádře? (modprobe?)\n"
+"- Snažíte se nastavit neplatnou taktiku?\n"
+"- Snažíte se nastavit urÄitou frekvenci, ale není dostupný\n"
+" regulátor ‚userspace‘, například protože není nahrán v jádře,\n"
+" nebo nelze na tomto hardware nastavit urÄitou frekvenci?\n"
+
+#: utils/cpufreq-set.c:170
+#, c-format
+msgid "wrong, unknown or unhandled CPU?\n"
+msgstr "neznámý nebo nepodporovaný CPU?\n"
+
+#: utils/cpufreq-set.c:302
+#, c-format
+msgid ""
+"the -f/--freq parameter cannot be combined with -d/--min, -u/--max or\n"
+"-g/--governor parameters\n"
+msgstr ""
+"pÅ™epínaÄ -f/--freq nemůže být použit zároveň\n"
+"s pÅ™epínaÄem -d/--min, -u/--max nebo -g/--governor\n"
+
+#: utils/cpufreq-set.c:308
+#, c-format
+msgid ""
+"At least one parameter out of -f/--freq, -d/--min, -u/--max, and\n"
+"-g/--governor must be passed\n"
+msgstr ""
+"Musí být zadán alespoň jeden pÅ™epínaÄ\n"
+"-f/--freq, -d/--min, -u/--max nebo -g/--governor\n"
+
+#: utils/cpufreq-set.c:347
+#, c-format
+msgid "Setting cpu: %d\n"
+msgstr ""
+
+#: utils/cpupower-set.c:22
+#, c-format
+msgid "Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:24
+#, c-format
+msgid ""
+" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-set.c:26
+#, c-format
+msgid ""
+" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:27
+#, c-format
+msgid ""
+" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
+"policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:80
+#, c-format
+msgid "--perf-bias param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:91
+#, c-format
+msgid "--sched-mc param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:102
+#, c-format
+msgid "--sched-smt param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:121
+#, c-format
+msgid "Error setting sched-mc %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:127
+#, c-format
+msgid "Error setting sched-smt %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:146
+#, c-format
+msgid "Error setting perf-bias value on CPU %d\n"
+msgstr ""
+
+#: utils/cpupower-info.c:21
+#, c-format
+msgid "Usage: cpupower info [ -b ] [ -m ] [ -s ]\n"
+msgstr ""
+
+#: utils/cpupower-info.c:23
+#, c-format
+msgid ""
+" -b, --perf-bias Gets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-info.c:25
+#, fuzzy, c-format
+msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
+msgstr " -p, --policy Zjistí aktuální taktiku cpufreq *\n"
+
+#: utils/cpupower-info.c:26
+#, c-format
+msgid ""
+" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-info.c:28
+#, c-format
+msgid ""
+"\n"
+"Passing no option will show all info, by default only on core 0\n"
+msgstr ""
+
+#: utils/cpupower-info.c:102
+#, c-format
+msgid "System's multi core scheduler setting: "
+msgstr ""
+
+#. if sysfs file is missing it's: errno == ENOENT
+#: utils/cpupower-info.c:105 utils/cpupower-info.c:114
+#, c-format
+msgid "not supported\n"
+msgstr ""
+
+#: utils/cpupower-info.c:111
+#, c-format
+msgid "System's thread sibling scheduler setting: "
+msgstr ""
+
+#: utils/cpupower-info.c:126
+#, c-format
+msgid "Intel's performance bias setting needs root privileges\n"
+msgstr ""
+
+#: utils/cpupower-info.c:128
+#, c-format
+msgid "System does not support Intel's performance bias setting\n"
+msgstr ""
+
+#: utils/cpupower-info.c:147
+#, c-format
+msgid "Could not read perf-bias value\n"
+msgstr ""
+
+#: utils/cpupower-info.c:150
+#, c-format
+msgid "perf-bias: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:28
+#, fuzzy, c-format
+msgid "Analyzing CPU %d:\n"
+msgstr "analyzuji CPU %d:\n"
+
+#: utils/cpuidle-info.c:32
+#, c-format
+msgid "CPU %u: No idle states\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:36
+#, c-format
+msgid "CPU %u: Can't read idle state info\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:41
+#, c-format
+msgid "Could not determine max idle state %u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:46
+#, c-format
+msgid "Number of idle states: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:48
+#, fuzzy, c-format
+msgid "Available idle states:"
+msgstr " dostupné frekvence: "
+
+#: utils/cpuidle-info.c:71
+#, c-format
+msgid "Flags/Description: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:74
+#, c-format
+msgid "Latency: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:76
+#, c-format
+msgid "Usage: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:78
+#, c-format
+msgid "Duration: %llu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:90
+#, c-format
+msgid "Could not determine cpuidle driver\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:94
+#, fuzzy, c-format
+msgid "CPUidle driver: %s\n"
+msgstr " ovladaÄ: %s\n"
+
+#: utils/cpuidle-info.c:99
+#, c-format
+msgid "Could not determine cpuidle governor\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:103
+#, c-format
+msgid "CPUidle governor: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:122
+#, c-format
+msgid "CPU %u: Can't read C-state info\n"
+msgstr ""
+
+#. printf("Cstates: %d\n", cstates);
+#: utils/cpuidle-info.c:127
+#, c-format
+msgid "active state: C0\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:128
+#, c-format
+msgid "max_cstate: C%u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:129
+#, c-format
+msgid "maximum allowed latency: %lu usec\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:130
+#, c-format
+msgid "states:\t\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:132
+#, c-format
+msgid " C%d: type[C%d] "
+msgstr ""
+
+#: utils/cpuidle-info.c:134
+#, c-format
+msgid "promotion[--] demotion[--] "
+msgstr ""
+
+#: utils/cpuidle-info.c:135
+#, c-format
+msgid "latency[%03lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:137
+#, c-format
+msgid "usage[%08lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:139
+#, c-format
+msgid "duration[%020Lu] \n"
+msgstr ""
+
+#: utils/cpuidle-info.c:147
+#, fuzzy, c-format
+msgid "Usage: cpupower idleinfo [options]\n"
+msgstr "Užití: cpufreq-info [pÅ™epínaÄe]\n"
+
+#: utils/cpuidle-info.c:149
+#, fuzzy, c-format
+msgid " -s, --silent Only show general C-state information\n"
+msgstr " -e, --debug Vypíše ladicí informace\n"
+
+#: utils/cpuidle-info.c:150
+#, fuzzy, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"acpi/processor/*/power\n"
+" interface in older kernels\n"
+msgstr ""
+" -o, --proc Vypíše informace ve formátu, jaký používalo rozhraní\n"
+" /proc/cpufreq v kernelech Å™ady 2.4 a Äasné 2.6\n"
+
+#: utils/cpuidle-info.c:209
+#, fuzzy, c-format
+msgid "You can't specify more than one output-specific argument\n"
+msgstr ""
+"Nelze zadat více než jeden parametr -c nebo --cpu\n"
+"anebo více než jeden parametr urÄující výstup\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU CPU number which information shall be determined "
+#~ "about\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU Číslo CPU, o kterém se mají zjistit informace\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU number of CPU where cpufreq settings shall be "
+#~ "modified\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU Číslo CPU pro který se má provést nastavení "
+#~ "cpufreq\n"
diff --git a/tools/power/cpupower/po/de.po b/tools/power/cpupower/po/de.po
new file mode 100644
index 00000000000..78c09e51663
--- /dev/null
+++ b/tools/power/cpupower/po/de.po
@@ -0,0 +1,961 @@
+# German translations for cpufrequtils package
+# German messages for cpufrequtils.
+# Copyright (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.net>
+# This file is distributed under the same license as the cpufrequtils package.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: cpufrequtils 006\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2011-03-08 17:03+0100\n"
+"PO-Revision-Date: 2009-08-08 17:18+0100\n"
+"Last-Translator: <linux@dominikbrodowski.net>\n"
+"Language-Team: NONE\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=ISO-8859-1\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: utils/idle_monitor/nhm_idle.c:36
+msgid "Processor Core C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:43
+msgid "Processor Core C6"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:51
+msgid "Processor Package C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:58 utils/idle_monitor/amd_fam14h_idle.c:70
+msgid "Processor Package C6"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:33
+msgid "Processor Core C7"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:40
+msgid "Processor Package C2"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:47
+msgid "Processor Package C7"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:56
+msgid "Package in sleep state (PC1 or deeper)"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:63
+msgid "Processor Package C1"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:77
+msgid "North Bridge P1 boolean counter (returns 0 or 1)"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:35
+msgid "Processor Core not idle"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:42
+msgid "Processor Core in an idle state"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:50
+msgid "Average Frequency (including boost) in MHz"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:66
+#, c-format
+msgid ""
+"cpupower monitor: [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:69
+#, c-format
+msgid ""
+"cpupower monitor: [-v] [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:71
+#, c-format
+msgid "\t -v: be more verbose\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:73
+#, c-format
+msgid "\t -h: print this help\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:74
+#, c-format
+msgid "\t -i: time intervall to measure for in seconds (default 1)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:75
+#, c-format
+msgid "\t -t: show CPU topology/hierarchy\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:76
+#, c-format
+msgid "\t -l: list available CPU sleep monitors (for use with -m)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:77
+#, c-format
+msgid "\t -m: show specific CPU sleep monitors only (in same order)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:79
+#, c-format
+msgid ""
+"only one of: -t, -l, -m are allowed\n"
+"If none of them is passed,"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:80
+#, c-format
+msgid " all supported monitors are shown\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:197
+#, c-format
+msgid "Monitor %s, Counter %s has no count function. Implementation error\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:207
+#, c-format
+msgid " *is offline\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:236
+#, c-format
+msgid "%s: max monitor name length (%d) exceeded\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:250
+#, c-format
+msgid "No matching monitor found in %s, try -l option\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:266
+#, c-format
+msgid "Monitor \"%s\" (%d states) - Might overflow after %u s\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:319
+#, c-format
+msgid "%s took %.5f seconds and exited with status %d\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:406
+#, c-format
+msgid "Cannot read number of available processors\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:417
+#, c-format
+msgid "Available monitor %s needs root access\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:428
+#, c-format
+msgid "No HW Cstate monitors found\n"
+msgstr ""
+
+#: utils/cpupower.c:78
+#, c-format
+msgid "cpupower [ -c cpulist ] subcommand [ARGS]\n"
+msgstr ""
+
+#: utils/cpupower.c:79
+#, c-format
+msgid "cpupower --version\n"
+msgstr ""
+
+#: utils/cpupower.c:80
+#, c-format
+msgid "Supported subcommands are:\n"
+msgstr ""
+
+#: utils/cpupower.c:83
+#, c-format
+msgid ""
+"\n"
+"Some subcommands can make use of the -c cpulist option.\n"
+msgstr ""
+
+#: utils/cpupower.c:84
+#, c-format
+msgid "Look at the general cpupower manpage how to use it\n"
+msgstr ""
+
+#: utils/cpupower.c:85
+#, c-format
+msgid "and read up the subcommand's manpage whether it is supported.\n"
+msgstr ""
+
+#: utils/cpupower.c:86
+#, c-format
+msgid ""
+"\n"
+"Use cpupower help subcommand for getting help for above subcommands.\n"
+msgstr ""
+
+#: utils/cpupower.c:91
+#, c-format
+msgid "Report errors and bugs to %s, please.\n"
+msgstr "Bitte melden Sie Fehler an %s.\n"
+
+#: utils/cpupower.c:114
+#, c-format
+msgid "Error parsing cpu list\n"
+msgstr ""
+
+#: utils/cpupower.c:172
+#, c-format
+msgid "Subcommand %s needs root privileges\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:31
+#, c-format
+msgid "Couldn't count the number of CPUs (%s: %s), assuming 1\n"
+msgstr ""
+"Konnte nicht die Anzahl der CPUs herausfinden (%s : %s), nehme daher 1 an.\n"
+
+#: utils/cpufreq-info.c:63
+#, c-format
+msgid ""
+" minimum CPU frequency - maximum CPU frequency - governor\n"
+msgstr ""
+" minimale CPU-Taktfreq. - maximale CPU-Taktfreq. - Regler \n"
+
+#: utils/cpufreq-info.c:151
+#, c-format
+msgid "Error while evaluating Boost Capabilities on CPU %d -- are you root?\n"
+msgstr ""
+
+#. P state changes via MSR are identified via cpuid 80000007
+#. on Intel and AMD, but we assume boost capable machines can do that
+#. if (cpuid_eax(0x80000000) >= 0x80000007
+#. && (cpuid_edx(0x80000007) & (1 << 7)))
+#.
+#: utils/cpufreq-info.c:161
+#, c-format
+msgid " boost state support: \n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163
+#, c-format
+msgid " Supported: %s\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "yes"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "no"
+msgstr ""
+
+#: utils/cpufreq-info.c:164
+#, fuzzy, c-format
+msgid " Active: %s\n"
+msgstr " Treiber: %s\n"
+
+#: utils/cpufreq-info.c:177
+#, c-format
+msgid " Boost States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:178
+#, c-format
+msgid " Total States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:181
+#, c-format
+msgid " Pstate-Pb%d: %luMHz (boost state)\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:184
+#, c-format
+msgid " Pstate-P%d: %luMHz\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:211
+#, c-format
+msgid " no or unknown cpufreq driver is active on this CPU\n"
+msgstr " kein oder nicht bestimmbarer cpufreq-Treiber aktiv\n"
+
+#: utils/cpufreq-info.c:213
+#, c-format
+msgid " driver: %s\n"
+msgstr " Treiber: %s\n"
+
+#: utils/cpufreq-info.c:219
+#, c-format
+msgid " CPUs which run at the same hardware frequency: "
+msgstr " Folgende CPUs laufen mit der gleichen Hardware-Taktfrequenz: "
+
+#: utils/cpufreq-info.c:230
+#, c-format
+msgid " CPUs which need to have their frequency coordinated by software: "
+msgstr " Die Taktfrequenz folgender CPUs werden per Software koordiniert: "
+
+#: utils/cpufreq-info.c:241
+#, c-format
+msgid " maximum transition latency: "
+msgstr " Maximale Dauer eines Taktfrequenzwechsels: "
+
+#: utils/cpufreq-info.c:247
+#, c-format
+msgid " hardware limits: "
+msgstr " Hardwarebedingte Grenzen der Taktfrequenz: "
+
+#: utils/cpufreq-info.c:256
+#, c-format
+msgid " available frequency steps: "
+msgstr " mögliche Taktfrequenzen: "
+
+#: utils/cpufreq-info.c:269
+#, c-format
+msgid " available cpufreq governors: "
+msgstr " mögliche Regler: "
+
+#: utils/cpufreq-info.c:280
+#, c-format
+msgid " current policy: frequency should be within "
+msgstr " momentane Taktik: die Frequenz soll innerhalb "
+
+#: utils/cpufreq-info.c:282
+#, c-format
+msgid " and "
+msgstr " und "
+
+#: utils/cpufreq-info.c:286
+#, c-format
+msgid ""
+"The governor \"%s\" may decide which speed to use\n"
+" within this range.\n"
+msgstr ""
+" liegen. Der Regler \"%s\" kann frei entscheiden,\n"
+" welche Taktfrequenz innerhalb dieser Grenze verwendet "
+"wird.\n"
+
+#: utils/cpufreq-info.c:293
+#, c-format
+msgid " current CPU frequency is "
+msgstr " momentane Taktfrequenz ist "
+
+#: utils/cpufreq-info.c:296
+#, c-format
+msgid " (asserted by call to hardware)"
+msgstr " (verifiziert durch Nachfrage bei der Hardware)"
+
+#: utils/cpufreq-info.c:304
+#, c-format
+msgid " cpufreq stats: "
+msgstr " Statistik:"
+
+#: utils/cpufreq-info.c:472
+#, fuzzy, c-format
+msgid "Usage: cpupower freqinfo [options]\n"
+msgstr "Aufruf: cpufreq-info [Optionen]\n"
+
+#: utils/cpufreq-info.c:473 utils/cpufreq-set.c:26 utils/cpupower-set.c:23
+#: utils/cpupower-info.c:22 utils/cpuidle-info.c:148
+#, c-format
+msgid "Options:\n"
+msgstr "Optionen:\n"
+
+#: utils/cpufreq-info.c:474
+#, fuzzy, c-format
+msgid " -e, --debug Prints out debug information [default]\n"
+msgstr ""
+" -e, --debug Erzeugt detaillierte Informationen, hilfreich\n"
+" zum Aufspüren von Fehlern\n"
+
+#: utils/cpufreq-info.c:475
+#, c-format
+msgid ""
+" -f, --freq Get frequency the CPU currently runs at, according\n"
+" to the cpufreq core *\n"
+msgstr ""
+" -f, --freq Findet die momentane CPU-Taktfrquenz heraus (nach\n"
+" Meinung des Betriebssystems) *\n"
+
+#: utils/cpufreq-info.c:477
+#, c-format
+msgid ""
+" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n"
+" it from hardware (only available to root) *\n"
+msgstr ""
+" -w, --hwfreq Findet die momentane CPU-Taktfrequenz heraus\n"
+" (verifiziert durch Nachfrage bei der Hardware)\n"
+" [nur der Administrator kann dies tun] *\n"
+
+#: utils/cpufreq-info.c:479
+#, c-format
+msgid ""
+" -l, --hwlimits Determine the minimum and maximum CPU frequency "
+"allowed *\n"
+msgstr ""
+" -l, --hwlimits Findet die minimale und maximale Taktfrequenz heraus "
+"*\n"
+
+#: utils/cpufreq-info.c:480
+#, c-format
+msgid " -d, --driver Determines the used cpufreq kernel driver *\n"
+msgstr " -d, --driver Findet den momentanen Treiber heraus *\n"
+
+#: utils/cpufreq-info.c:481
+#, c-format
+msgid " -p, --policy Gets the currently used cpufreq policy *\n"
+msgstr " -p, --policy Findet die momentane Taktik heraus *\n"
+
+#: utils/cpufreq-info.c:482
+#, c-format
+msgid " -g, --governors Determines available cpufreq governors *\n"
+msgstr " -g, --governors Erzeugt eine Liste mit verfügbaren Reglern *\n"
+
+#: utils/cpufreq-info.c:483
+#, c-format
+msgid ""
+" -r, --related-cpus Determines which CPUs run at the same hardware "
+"frequency *\n"
+msgstr ""
+" -r, --related-cpus Findet heraus, welche CPUs mit derselben "
+"physikalischen\n"
+" Taktfrequenz laufen *\n"
+
+#: utils/cpufreq-info.c:484
+#, c-format
+msgid ""
+" -a, --affected-cpus Determines which CPUs need to have their frequency\n"
+" coordinated by software *\n"
+msgstr ""
+" -a, --affected-cpus Findet heraus, von welchen CPUs die Taktfrequenz "
+"durch\n"
+" Software koordiniert werden muss *\n"
+
+#: utils/cpufreq-info.c:486
+#, c-format
+msgid " -s, --stats Shows cpufreq statistics if available\n"
+msgstr ""
+" -s, --stats Zeigt, sofern möglich, Statistiken über cpufreq an.\n"
+
+#: utils/cpufreq-info.c:487
+#, c-format
+msgid ""
+" -y, --latency Determines the maximum latency on CPU frequency "
+"changes *\n"
+msgstr ""
+" -y, --latency Findet die maximale Dauer eines Taktfrequenzwechsels "
+"heraus *\n"
+
+#: utils/cpufreq-info.c:488
+#, c-format
+msgid " -b, --boost Checks for turbo or boost modes *\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:489
+#, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"cpufreq\n"
+" interface in 2.4. and early 2.6. kernels\n"
+msgstr ""
+" -o, --proc Erzeugt Informationen in einem ähnlichem Format zu "
+"dem\n"
+" der /proc/cpufreq-Datei in 2.4. und frühen 2.6.\n"
+" Kernel-Versionen\n"
+
+#: utils/cpufreq-info.c:491
+#, c-format
+msgid ""
+" -m, --human human-readable output for the -f, -w, -s and -y "
+"parameters\n"
+msgstr ""
+" -m, --human Formatiert Taktfrequenz- und Zeitdauerangaben in "
+"besser\n"
+" lesbarer Form (MHz, GHz; us, ms)\n"
+
+#: utils/cpufreq-info.c:492 utils/cpuidle-info.c:152
+#, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help Gibt diese Kurzübersicht aus\n"
+
+#: utils/cpufreq-info.c:495
+#, c-format
+msgid ""
+"If no argument or only the -c, --cpu parameter is given, debug output about\n"
+"cpufreq is printed which is useful e.g. for reporting bugs.\n"
+msgstr ""
+"Sofern kein anderer Parameter als '-c, --cpu' angegeben wird, liefert "
+"dieses\n"
+"Programm Informationen, die z.B. zum Berichten von Fehlern nützlich sind.\n"
+
+#: utils/cpufreq-info.c:497
+#, c-format
+msgid ""
+"For the arguments marked with *, omitting the -c or --cpu argument is\n"
+"equivalent to setting it to zero\n"
+msgstr ""
+"Bei den mit * markierten Parametern wird '--cpu 0' angenommen, soweit nicht\n"
+"mittels -c oder --cpu etwas anderes angegeben wird\n"
+
+#: utils/cpufreq-info.c:580
+#, c-format
+msgid ""
+"The argument passed to this tool can't be combined with passing a --cpu "
+"argument\n"
+msgstr "Diese Option kann nicht mit der --cpu-Option kombiniert werden\n"
+
+#: utils/cpufreq-info.c:596
+#, c-format
+msgid ""
+"You can't specify more than one --cpu parameter and/or\n"
+"more than one output-specific argument\n"
+msgstr ""
+"Man kann nicht mehr als einen --cpu-Parameter und/oder mehr als einen\n"
+"informationsspezifischen Parameter gleichzeitig angeben\n"
+
+#: utils/cpufreq-info.c:600 utils/cpufreq-set.c:82 utils/cpupower-set.c:42
+#: utils/cpupower-info.c:42 utils/cpuidle-info.c:213
+#, c-format
+msgid "invalid or unknown argument\n"
+msgstr "unbekannter oder falscher Parameter\n"
+
+#: utils/cpufreq-info.c:617
+#, c-format
+msgid "couldn't analyze CPU %d as it doesn't seem to be present\n"
+msgstr ""
+"Konnte nicht die CPU %d analysieren, da sie (scheinbar?) nicht existiert.\n"
+
+#: utils/cpufreq-info.c:620 utils/cpupower-info.c:142
+#, c-format
+msgid "analyzing CPU %d:\n"
+msgstr "analysiere CPU %d:\n"
+
+#: utils/cpufreq-set.c:25
+#, fuzzy, c-format
+msgid "Usage: cpupower frequency-set [options]\n"
+msgstr "Aufruf: cpufreq-set [Optionen]\n"
+
+#: utils/cpufreq-set.c:27
+#, c-format
+msgid ""
+" -d FREQ, --min FREQ new minimum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -d FREQ, --min FREQ neue minimale Taktfrequenz, die der Regler\n"
+" auswählen darf\n"
+
+#: utils/cpufreq-set.c:28
+#, c-format
+msgid ""
+" -u FREQ, --max FREQ new maximum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -u FREQ, --max FREQ neue maximale Taktfrequenz, die der Regler\n"
+" auswählen darf\n"
+
+#: utils/cpufreq-set.c:29
+#, c-format
+msgid " -g GOV, --governor GOV new cpufreq governor\n"
+msgstr " -g GOV, --governors GOV wechsle zu Regler GOV\n"
+
+#: utils/cpufreq-set.c:30
+#, c-format
+msgid ""
+" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n"
+" governor to be available and loaded\n"
+msgstr ""
+" -f FREQ, --freq FREQ setze exakte Taktfrequenz. Benötigt den Regler\n"
+" 'userspace'.\n"
+
+#: utils/cpufreq-set.c:32
+#, c-format
+msgid " -r, --related Switches all hardware-related CPUs\n"
+msgstr ""
+" -r, --related Setze Werte für alle CPUs, deren Taktfrequenz\n"
+" hardwarebedingt identisch ist.\n"
+
+#: utils/cpufreq-set.c:33 utils/cpupower-set.c:28 utils/cpupower-info.c:27
+#, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help Gibt diese Kurzübersicht aus\n"
+
+#: utils/cpufreq-set.c:35
+#, fuzzy, c-format
+msgid ""
+"Notes:\n"
+"1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n"
+msgstr ""
+"Bei den mit * markierten Parametern wird '--cpu 0' angenommen, soweit nicht\n"
+"mittels -c oder --cpu etwas anderes angegeben wird\n"
+
+#: utils/cpufreq-set.c:37
+#, fuzzy, c-format
+msgid ""
+"2. The -f FREQ, --freq FREQ parameter cannot be combined with any other "
+"parameter\n"
+" except the -c CPU, --cpu CPU parameter\n"
+"3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n"
+" by postfixing the value with the wanted unit name, without any space\n"
+" (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"
+msgstr ""
+"Hinweise:\n"
+"1. Sofern kein -c oder --cpu-Parameter angegeben ist, wird '--cpu 0'\n"
+" angenommen\n"
+"2. Der Parameter -f bzw. --freq kann mit keinem anderen als dem Parameter\n"
+" -c bzw. --cpu kombiniert werden\n"
+"3. FREQuenzen können in Hz, kHz (Standard), MHz, GHz oder THz eingegeben\n"
+" werden, indem der Wert und unmittelbar anschließend (ohne Leerzeichen!)\n"
+" die Einheit angegeben werden. (Bsp: 1GHz )\n"
+" (FREQuenz in kHz =^ MHz * 1000 =^ GHz * 1000000).\n"
+
+#: utils/cpufreq-set.c:57
+#, c-format
+msgid ""
+"Error setting new values. Common errors:\n"
+"- Do you have proper administration rights? (super-user?)\n"
+"- Is the governor you requested available and modprobed?\n"
+"- Trying to set an invalid policy?\n"
+"- Trying to set a specific frequency, but userspace governor is not "
+"available,\n"
+" for example because of hardware which cannot be set to a specific "
+"frequency\n"
+" or because the userspace governor isn't loaded?\n"
+msgstr ""
+"Beim Einstellen ist ein Fehler aufgetreten. Typische Fehlerquellen sind:\n"
+"- nicht ausreichende Rechte (Administrator)\n"
+"- der Regler ist nicht verfügbar bzw. nicht geladen\n"
+"- die angegebene Taktik ist inkorrekt\n"
+"- eine spezifische Frequenz wurde angegeben, aber der Regler 'userspace'\n"
+" kann entweder hardwarebedingt nicht genutzt werden oder ist nicht geladen\n"
+
+#: utils/cpufreq-set.c:170
+#, c-format
+msgid "wrong, unknown or unhandled CPU?\n"
+msgstr "unbekannte oder nicht regelbare CPU\n"
+
+#: utils/cpufreq-set.c:302
+#, c-format
+msgid ""
+"the -f/--freq parameter cannot be combined with -d/--min, -u/--max or\n"
+"-g/--governor parameters\n"
+msgstr ""
+"Der -f bzw. --freq-Parameter kann nicht mit den Parametern -d/--min, -u/--"
+"max\n"
+"oder -g/--governor kombiniert werden\n"
+
+#: utils/cpufreq-set.c:308
+#, c-format
+msgid ""
+"At least one parameter out of -f/--freq, -d/--min, -u/--max, and\n"
+"-g/--governor must be passed\n"
+msgstr ""
+"Es muss mindestens ein Parameter aus -f/--freq, -d/--min, -u/--max oder\n"
+"-g/--governor angegeben werden.\n"
+
+#: utils/cpufreq-set.c:347
+#, c-format
+msgid "Setting cpu: %d\n"
+msgstr ""
+
+#: utils/cpupower-set.c:22
+#, c-format
+msgid "Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:24
+#, c-format
+msgid ""
+" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-set.c:26
+#, c-format
+msgid ""
+" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:27
+#, c-format
+msgid ""
+" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
+"policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:80
+#, c-format
+msgid "--perf-bias param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:91
+#, c-format
+msgid "--sched-mc param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:102
+#, c-format
+msgid "--sched-smt param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:121
+#, c-format
+msgid "Error setting sched-mc %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:127
+#, c-format
+msgid "Error setting sched-smt %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:146
+#, c-format
+msgid "Error setting perf-bias value on CPU %d\n"
+msgstr ""
+
+#: utils/cpupower-info.c:21
+#, c-format
+msgid "Usage: cpupower info [ -b ] [ -m ] [ -s ]\n"
+msgstr ""
+
+#: utils/cpupower-info.c:23
+#, c-format
+msgid ""
+" -b, --perf-bias Gets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-info.c:25
+#, fuzzy, c-format
+msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
+msgstr " -p, --policy Findet die momentane Taktik heraus *\n"
+
+#: utils/cpupower-info.c:26
+#, c-format
+msgid ""
+" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-info.c:28
+#, c-format
+msgid ""
+"\n"
+"Passing no option will show all info, by default only on core 0\n"
+msgstr ""
+
+#: utils/cpupower-info.c:102
+#, c-format
+msgid "System's multi core scheduler setting: "
+msgstr ""
+
+#. if sysfs file is missing it's: errno == ENOENT
+#: utils/cpupower-info.c:105 utils/cpupower-info.c:114
+#, c-format
+msgid "not supported\n"
+msgstr ""
+
+#: utils/cpupower-info.c:111
+#, c-format
+msgid "System's thread sibling scheduler setting: "
+msgstr ""
+
+#: utils/cpupower-info.c:126
+#, c-format
+msgid "Intel's performance bias setting needs root privileges\n"
+msgstr ""
+
+#: utils/cpupower-info.c:128
+#, c-format
+msgid "System does not support Intel's performance bias setting\n"
+msgstr ""
+
+#: utils/cpupower-info.c:147
+#, c-format
+msgid "Could not read perf-bias value\n"
+msgstr ""
+
+#: utils/cpupower-info.c:150
+#, c-format
+msgid "perf-bias: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:28
+#, fuzzy, c-format
+msgid "Analyzing CPU %d:\n"
+msgstr "analysiere CPU %d:\n"
+
+#: utils/cpuidle-info.c:32
+#, c-format
+msgid "CPU %u: No idle states\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:36
+#, c-format
+msgid "CPU %u: Can't read idle state info\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:41
+#, c-format
+msgid "Could not determine max idle state %u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:46
+#, c-format
+msgid "Number of idle states: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:48
+#, fuzzy, c-format
+msgid "Available idle states:"
+msgstr " mögliche Taktfrequenzen: "
+
+#: utils/cpuidle-info.c:71
+#, c-format
+msgid "Flags/Description: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:74
+#, c-format
+msgid "Latency: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:76
+#, c-format
+msgid "Usage: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:78
+#, c-format
+msgid "Duration: %llu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:90
+#, c-format
+msgid "Could not determine cpuidle driver\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:94
+#, fuzzy, c-format
+msgid "CPUidle driver: %s\n"
+msgstr " Treiber: %s\n"
+
+#: utils/cpuidle-info.c:99
+#, c-format
+msgid "Could not determine cpuidle governor\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:103
+#, c-format
+msgid "CPUidle governor: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:122
+#, c-format
+msgid "CPU %u: Can't read C-state info\n"
+msgstr ""
+
+#. printf("Cstates: %d\n", cstates);
+#: utils/cpuidle-info.c:127
+#, c-format
+msgid "active state: C0\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:128
+#, c-format
+msgid "max_cstate: C%u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:129
+#, fuzzy, c-format
+msgid "maximum allowed latency: %lu usec\n"
+msgstr " Maximale Dauer eines Taktfrequenzwechsels: "
+
+#: utils/cpuidle-info.c:130
+#, c-format
+msgid "states:\t\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:132
+#, c-format
+msgid " C%d: type[C%d] "
+msgstr ""
+
+#: utils/cpuidle-info.c:134
+#, c-format
+msgid "promotion[--] demotion[--] "
+msgstr ""
+
+#: utils/cpuidle-info.c:135
+#, c-format
+msgid "latency[%03lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:137
+#, c-format
+msgid "usage[%08lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:139
+#, c-format
+msgid "duration[%020Lu] \n"
+msgstr ""
+
+#: utils/cpuidle-info.c:147
+#, fuzzy, c-format
+msgid "Usage: cpupower idleinfo [options]\n"
+msgstr "Aufruf: cpufreq-info [Optionen]\n"
+
+#: utils/cpuidle-info.c:149
+#, fuzzy, c-format
+msgid " -s, --silent Only show general C-state information\n"
+msgstr ""
+" -e, --debug Erzeugt detaillierte Informationen, hilfreich\n"
+" zum Aufspüren von Fehlern\n"
+
+#: utils/cpuidle-info.c:150
+#, fuzzy, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"acpi/processor/*/power\n"
+" interface in older kernels\n"
+msgstr ""
+" -o, --proc Erzeugt Informationen in einem ähnlichem Format zu "
+"dem\n"
+" der /proc/cpufreq-Datei in 2.4. und frühen 2.6.\n"
+" Kernel-Versionen\n"
+
+#: utils/cpuidle-info.c:209
+#, fuzzy, c-format
+msgid "You can't specify more than one output-specific argument\n"
+msgstr ""
+"Man kann nicht mehr als einen --cpu-Parameter und/oder mehr als einen\n"
+"informationsspezifischen Parameter gleichzeitig angeben\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU CPU number which information shall be determined "
+#~ "about\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU Nummer der CPU, über die Informationen "
+#~ "herausgefunden werden sollen\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU number of CPU where cpufreq settings shall be "
+#~ "modified\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU Nummer der CPU, deren Taktfrequenz-"
+#~ "Einstellung\n"
+#~ " werden soll\n"
diff --git a/tools/power/cpupower/po/fr.po b/tools/power/cpupower/po/fr.po
new file mode 100644
index 00000000000..245ad20a9bf
--- /dev/null
+++ b/tools/power/cpupower/po/fr.po
@@ -0,0 +1,947 @@
+# French translations for cpufrequtils package
+# Copyright (C) 2004 THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the cpufrequtils package.
+# Ducrot Bruno <ducrot@poupinou.org>, 2004.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: cpufrequtils 0.1-pre2\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2011-03-08 17:03+0100\n"
+"PO-Revision-Date: 2004-11-17 15:53+1000\n"
+"Last-Translator: Bruno Ducrot <ducrot@poupinou.org>\n"
+"Language-Team: NONE\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=ISO-8859-1\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#: utils/idle_monitor/nhm_idle.c:36
+msgid "Processor Core C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:43
+msgid "Processor Core C6"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:51
+msgid "Processor Package C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:58 utils/idle_monitor/amd_fam14h_idle.c:70
+msgid "Processor Package C6"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:33
+msgid "Processor Core C7"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:40
+msgid "Processor Package C2"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:47
+msgid "Processor Package C7"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:56
+msgid "Package in sleep state (PC1 or deeper)"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:63
+msgid "Processor Package C1"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:77
+msgid "North Bridge P1 boolean counter (returns 0 or 1)"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:35
+msgid "Processor Core not idle"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:42
+msgid "Processor Core in an idle state"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:50
+msgid "Average Frequency (including boost) in MHz"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:66
+#, c-format
+msgid ""
+"cpupower monitor: [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:69
+#, c-format
+msgid ""
+"cpupower monitor: [-v] [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:71
+#, c-format
+msgid "\t -v: be more verbose\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:73
+#, c-format
+msgid "\t -h: print this help\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:74
+#, c-format
+msgid "\t -i: time intervall to measure for in seconds (default 1)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:75
+#, c-format
+msgid "\t -t: show CPU topology/hierarchy\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:76
+#, c-format
+msgid "\t -l: list available CPU sleep monitors (for use with -m)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:77
+#, c-format
+msgid "\t -m: show specific CPU sleep monitors only (in same order)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:79
+#, c-format
+msgid ""
+"only one of: -t, -l, -m are allowed\n"
+"If none of them is passed,"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:80
+#, c-format
+msgid " all supported monitors are shown\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:197
+#, c-format
+msgid "Monitor %s, Counter %s has no count function. Implementation error\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:207
+#, c-format
+msgid " *is offline\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:236
+#, c-format
+msgid "%s: max monitor name length (%d) exceeded\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:250
+#, c-format
+msgid "No matching monitor found in %s, try -l option\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:266
+#, c-format
+msgid "Monitor \"%s\" (%d states) - Might overflow after %u s\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:319
+#, c-format
+msgid "%s took %.5f seconds and exited with status %d\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:406
+#, c-format
+msgid "Cannot read number of available processors\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:417
+#, c-format
+msgid "Available monitor %s needs root access\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:428
+#, c-format
+msgid "No HW Cstate monitors found\n"
+msgstr ""
+
+#: utils/cpupower.c:78
+#, c-format
+msgid "cpupower [ -c cpulist ] subcommand [ARGS]\n"
+msgstr ""
+
+#: utils/cpupower.c:79
+#, c-format
+msgid "cpupower --version\n"
+msgstr ""
+
+#: utils/cpupower.c:80
+#, c-format
+msgid "Supported subcommands are:\n"
+msgstr ""
+
+#: utils/cpupower.c:83
+#, c-format
+msgid ""
+"\n"
+"Some subcommands can make use of the -c cpulist option.\n"
+msgstr ""
+
+#: utils/cpupower.c:84
+#, c-format
+msgid "Look at the general cpupower manpage how to use it\n"
+msgstr ""
+
+#: utils/cpupower.c:85
+#, c-format
+msgid "and read up the subcommand's manpage whether it is supported.\n"
+msgstr ""
+
+#: utils/cpupower.c:86
+#, c-format
+msgid ""
+"\n"
+"Use cpupower help subcommand for getting help for above subcommands.\n"
+msgstr ""
+
+#: utils/cpupower.c:91
+#, c-format
+msgid "Report errors and bugs to %s, please.\n"
+msgstr "Veuillez rapportez les erreurs et les bogues à %s, s'il vous plait.\n"
+
+#: utils/cpupower.c:114
+#, c-format
+msgid "Error parsing cpu list\n"
+msgstr ""
+
+#: utils/cpupower.c:172
+#, c-format
+msgid "Subcommand %s needs root privileges\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:31
+#, c-format
+msgid "Couldn't count the number of CPUs (%s: %s), assuming 1\n"
+msgstr "Détermination du nombre de CPUs (%s : %s) impossible. Assume 1\n"
+
+#: utils/cpufreq-info.c:63
+#, c-format
+msgid ""
+" minimum CPU frequency - maximum CPU frequency - governor\n"
+msgstr ""
+" Fréquence CPU minimale - Fréquence CPU maximale - régulateur\n"
+
+#: utils/cpufreq-info.c:151
+#, c-format
+msgid "Error while evaluating Boost Capabilities on CPU %d -- are you root?\n"
+msgstr ""
+
+#. P state changes via MSR are identified via cpuid 80000007
+#. on Intel and AMD, but we assume boost capable machines can do that
+#. if (cpuid_eax(0x80000000) >= 0x80000007
+#. && (cpuid_edx(0x80000007) & (1 << 7)))
+#.
+#: utils/cpufreq-info.c:161
+#, c-format
+msgid " boost state support: \n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163
+#, c-format
+msgid " Supported: %s\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "yes"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "no"
+msgstr ""
+
+#: utils/cpufreq-info.c:164
+#, fuzzy, c-format
+msgid " Active: %s\n"
+msgstr " pilote : %s\n"
+
+#: utils/cpufreq-info.c:177
+#, c-format
+msgid " Boost States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:178
+#, c-format
+msgid " Total States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:181
+#, c-format
+msgid " Pstate-Pb%d: %luMHz (boost state)\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:184
+#, c-format
+msgid " Pstate-P%d: %luMHz\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:211
+#, c-format
+msgid " no or unknown cpufreq driver is active on this CPU\n"
+msgstr " pas de pilotes cpufreq reconnu pour ce CPU\n"
+
+#: utils/cpufreq-info.c:213
+#, c-format
+msgid " driver: %s\n"
+msgstr " pilote : %s\n"
+
+#: utils/cpufreq-info.c:219
+#, fuzzy, c-format
+msgid " CPUs which run at the same hardware frequency: "
+msgstr " CPUs qui doivent changer de fréquences en même temps : "
+
+#: utils/cpufreq-info.c:230
+#, fuzzy, c-format
+msgid " CPUs which need to have their frequency coordinated by software: "
+msgstr " CPUs qui doivent changer de fréquences en même temps : "
+
+#: utils/cpufreq-info.c:241
+#, c-format
+msgid " maximum transition latency: "
+msgstr ""
+
+#: utils/cpufreq-info.c:247
+#, c-format
+msgid " hardware limits: "
+msgstr " limitation matérielle : "
+
+#: utils/cpufreq-info.c:256
+#, c-format
+msgid " available frequency steps: "
+msgstr " plage de fréquence : "
+
+#: utils/cpufreq-info.c:269
+#, c-format
+msgid " available cpufreq governors: "
+msgstr " régulateurs disponibles : "
+
+#: utils/cpufreq-info.c:280
+#, c-format
+msgid " current policy: frequency should be within "
+msgstr " tactique actuelle : la fréquence doit être comprise entre "
+
+#: utils/cpufreq-info.c:282
+#, c-format
+msgid " and "
+msgstr " et "
+
+#: utils/cpufreq-info.c:286
+#, c-format
+msgid ""
+"The governor \"%s\" may decide which speed to use\n"
+" within this range.\n"
+msgstr ""
+"Le régulateur \"%s\" est libre de choisir la vitesse\n"
+" dans cette plage de fréquences.\n"
+
+#: utils/cpufreq-info.c:293
+#, c-format
+msgid " current CPU frequency is "
+msgstr " la fréquence actuelle de ce CPU est "
+
+#: utils/cpufreq-info.c:296
+#, c-format
+msgid " (asserted by call to hardware)"
+msgstr " (vérifié par un appel direct du matériel)"
+
+#: utils/cpufreq-info.c:304
+#, c-format
+msgid " cpufreq stats: "
+msgstr " des statistique concernant cpufreq:"
+
+#: utils/cpufreq-info.c:472
+#, fuzzy, c-format
+msgid "Usage: cpupower freqinfo [options]\n"
+msgstr "Usage : cpufreq-info [options]\n"
+
+#: utils/cpufreq-info.c:473 utils/cpufreq-set.c:26 utils/cpupower-set.c:23
+#: utils/cpupower-info.c:22 utils/cpuidle-info.c:148
+#, c-format
+msgid "Options:\n"
+msgstr "Options :\n"
+
+#: utils/cpufreq-info.c:474
+#, fuzzy, c-format
+msgid " -e, --debug Prints out debug information [default]\n"
+msgstr " -e, --debug Afficher les informations de déboguage\n"
+
+#: utils/cpufreq-info.c:475
+#, c-format
+msgid ""
+" -f, --freq Get frequency the CPU currently runs at, according\n"
+" to the cpufreq core *\n"
+msgstr ""
+" -f, --freq Obtenir la fréquence actuelle du CPU selon le point\n"
+" de vue du coeur du système de cpufreq *\n"
+
+#: utils/cpufreq-info.c:477
+#, c-format
+msgid ""
+" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n"
+" it from hardware (only available to root) *\n"
+msgstr ""
+" -w, --hwfreq Obtenir la fréquence actuelle du CPU directement par\n"
+" le matériel (doit être root) *\n"
+
+#: utils/cpufreq-info.c:479
+#, c-format
+msgid ""
+" -l, --hwlimits Determine the minimum and maximum CPU frequency "
+"allowed *\n"
+msgstr ""
+" -l, --hwlimits Affiche les fréquences minimales et maximales du CPU "
+"*\n"
+
+#: utils/cpufreq-info.c:480
+#, c-format
+msgid " -d, --driver Determines the used cpufreq kernel driver *\n"
+msgstr " -d, --driver Affiche le pilote cpufreq utilisé *\n"
+
+#: utils/cpufreq-info.c:481
+#, c-format
+msgid " -p, --policy Gets the currently used cpufreq policy *\n"
+msgstr " -p, --policy Affiche la tactique actuelle de cpufreq *\n"
+
+#: utils/cpufreq-info.c:482
+#, c-format
+msgid " -g, --governors Determines available cpufreq governors *\n"
+msgstr ""
+" -g, --governors Affiche les régulateurs disponibles de cpufreq *\n"
+
+#: utils/cpufreq-info.c:483
+#, fuzzy, c-format
+msgid ""
+" -r, --related-cpus Determines which CPUs run at the same hardware "
+"frequency *\n"
+msgstr ""
+" -a, --affected-cpus Affiche quels sont les CPUs qui doivent changer de\n"
+" fréquences en même temps *\n"
+
+#: utils/cpufreq-info.c:484
+#, fuzzy, c-format
+msgid ""
+" -a, --affected-cpus Determines which CPUs need to have their frequency\n"
+" coordinated by software *\n"
+msgstr ""
+" -a, --affected-cpus Affiche quels sont les CPUs qui doivent changer de\n"
+" fréquences en même temps *\n"
+
+#: utils/cpufreq-info.c:486
+#, c-format
+msgid " -s, --stats Shows cpufreq statistics if available\n"
+msgstr ""
+" -s, --stats Indique des statistiques concernant cpufreq, si\n"
+" disponibles\n"
+
+#: utils/cpufreq-info.c:487
+#, fuzzy, c-format
+msgid ""
+" -y, --latency Determines the maximum latency on CPU frequency "
+"changes *\n"
+msgstr ""
+" -l, --hwlimits Affiche les fréquences minimales et maximales du CPU "
+"*\n"
+
+#: utils/cpufreq-info.c:488
+#, c-format
+msgid " -b, --boost Checks for turbo or boost modes *\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:489
+#, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"cpufreq\n"
+" interface in 2.4. and early 2.6. kernels\n"
+msgstr ""
+" -o, --proc Affiche les informations en utilisant l'interface\n"
+" fournie par /proc/cpufreq, présente dans les "
+"versions\n"
+" 2.4 et les anciennes versions 2.6 du noyau\n"
+
+#: utils/cpufreq-info.c:491
+#, fuzzy, c-format
+msgid ""
+" -m, --human human-readable output for the -f, -w, -s and -y "
+"parameters\n"
+msgstr ""
+" -m, --human affiche dans un format lisible pour un humain\n"
+" pour les options -f, -w et -s (MHz, GHz)\n"
+
+#: utils/cpufreq-info.c:492 utils/cpuidle-info.c:152
+#, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help affiche l'aide-mémoire\n"
+
+#: utils/cpufreq-info.c:495
+#, c-format
+msgid ""
+"If no argument or only the -c, --cpu parameter is given, debug output about\n"
+"cpufreq is printed which is useful e.g. for reporting bugs.\n"
+msgstr ""
+"Par défaut, les informations de déboguage seront affichées si aucun\n"
+"argument, ou bien si seulement l'argument -c (--cpu) est donné, afin de\n"
+"faciliter les rapports de bogues par exemple\n"
+
+#: utils/cpufreq-info.c:497
+#, c-format
+msgid ""
+"For the arguments marked with *, omitting the -c or --cpu argument is\n"
+"equivalent to setting it to zero\n"
+msgstr "Les arguments avec un * utiliseront le CPU 0 si -c (--cpu) est omis\n"
+
+#: utils/cpufreq-info.c:580
+#, c-format
+msgid ""
+"The argument passed to this tool can't be combined with passing a --cpu "
+"argument\n"
+msgstr "Cette option est incompatible avec --cpu\n"
+
+#: utils/cpufreq-info.c:596
+#, c-format
+msgid ""
+"You can't specify more than one --cpu parameter and/or\n"
+"more than one output-specific argument\n"
+msgstr ""
+"On ne peut indiquer plus d'un paramètre --cpu, tout comme l'on ne peut\n"
+"spécifier plus d'un argument de formatage\n"
+
+#: utils/cpufreq-info.c:600 utils/cpufreq-set.c:82 utils/cpupower-set.c:42
+#: utils/cpupower-info.c:42 utils/cpuidle-info.c:213
+#, c-format
+msgid "invalid or unknown argument\n"
+msgstr "option invalide\n"
+
+#: utils/cpufreq-info.c:617
+#, c-format
+msgid "couldn't analyze CPU %d as it doesn't seem to be present\n"
+msgstr "analyse du CPU %d impossible puisqu'il ne semble pas être présent\n"
+
+#: utils/cpufreq-info.c:620 utils/cpupower-info.c:142
+#, c-format
+msgid "analyzing CPU %d:\n"
+msgstr "analyse du CPU %d :\n"
+
+#: utils/cpufreq-set.c:25
+#, fuzzy, c-format
+msgid "Usage: cpupower frequency-set [options]\n"
+msgstr "Usage : cpufreq-set [options]\n"
+
+#: utils/cpufreq-set.c:27
+#, c-format
+msgid ""
+" -d FREQ, --min FREQ new minimum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -d FREQ, --min FREQ nouvelle fréquence minimale du CPU à utiliser\n"
+" par le régulateur\n"
+
+#: utils/cpufreq-set.c:28
+#, c-format
+msgid ""
+" -u FREQ, --max FREQ new maximum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -u FREQ, --max FREQ nouvelle fréquence maximale du CPU à utiliser\n"
+" par le régulateur\n"
+
+#: utils/cpufreq-set.c:29
+#, c-format
+msgid " -g GOV, --governor GOV new cpufreq governor\n"
+msgstr " -g GOV, --governor GOV active le régulateur GOV\n"
+
+#: utils/cpufreq-set.c:30
+#, c-format
+msgid ""
+" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n"
+" governor to be available and loaded\n"
+msgstr ""
+" -f FREQ, --freq FREQ fixe la fréquence du processeur à FREQ. Il faut\n"
+" que le régulateur « userspace » soit disponible \n"
+" et activé.\n"
+
+#: utils/cpufreq-set.c:32
+#, c-format
+msgid " -r, --related Switches all hardware-related CPUs\n"
+msgstr ""
+
+#: utils/cpufreq-set.c:33 utils/cpupower-set.c:28 utils/cpupower-info.c:27
+#, fuzzy, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help affiche l'aide-mémoire\n"
+
+#: utils/cpufreq-set.c:35
+#, fuzzy, c-format
+msgid ""
+"Notes:\n"
+"1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n"
+msgstr "Les arguments avec un * utiliseront le CPU 0 si -c (--cpu) est omis\n"
+
+#: utils/cpufreq-set.c:37
+#, fuzzy, c-format
+msgid ""
+"2. The -f FREQ, --freq FREQ parameter cannot be combined with any other "
+"parameter\n"
+" except the -c CPU, --cpu CPU parameter\n"
+"3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n"
+" by postfixing the value with the wanted unit name, without any space\n"
+" (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"
+msgstr ""
+"Remarque :\n"
+"1. Le CPU numéro 0 sera utilisé par défaut si -c (ou --cpu) est omis ;\n"
+"2. l'argument -f FREQ (ou --freq FREQ) ne peut être utilisé qu'avec --cpu ;\n"
+"3. on pourra préciser l'unité des fréquences en postfixant sans aucune "
+"espace\n"
+" les valeurs par hz, kHz (par défaut), MHz, GHz ou THz\n"
+" (kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"
+
+#: utils/cpufreq-set.c:57
+#, c-format
+msgid ""
+"Error setting new values. Common errors:\n"
+"- Do you have proper administration rights? (super-user?)\n"
+"- Is the governor you requested available and modprobed?\n"
+"- Trying to set an invalid policy?\n"
+"- Trying to set a specific frequency, but userspace governor is not "
+"available,\n"
+" for example because of hardware which cannot be set to a specific "
+"frequency\n"
+" or because the userspace governor isn't loaded?\n"
+msgstr ""
+"En ajustant les nouveaux paramètres, une erreur est apparue. Les sources\n"
+"d'erreur typique sont :\n"
+"- droit d'administration insuffisant (êtes-vous root ?) ;\n"
+"- le régulateur choisi n'est pas disponible, ou bien n'est pas disponible "
+"en\n"
+" tant que module noyau ;\n"
+"- la tactique n'est pas disponible ;\n"
+"- vous voulez utiliser l'option -f/--freq, mais le régulateur « userspace »\n"
+" n'est pas disponible, par exemple parce que le matériel ne le supporte\n"
+" pas, ou bien n'est tout simplement pas chargé.\n"
+
+#: utils/cpufreq-set.c:170
+#, c-format
+msgid "wrong, unknown or unhandled CPU?\n"
+msgstr "CPU inconnu ou non supporté ?\n"
+
+#: utils/cpufreq-set.c:302
+#, c-format
+msgid ""
+"the -f/--freq parameter cannot be combined with -d/--min, -u/--max or\n"
+"-g/--governor parameters\n"
+msgstr ""
+"l'option -f/--freq est incompatible avec les options -d/--min, -u/--max et\n"
+"-g/--governor\n"
+
+#: utils/cpufreq-set.c:308
+#, c-format
+msgid ""
+"At least one parameter out of -f/--freq, -d/--min, -u/--max, and\n"
+"-g/--governor must be passed\n"
+msgstr ""
+"L'un de ces paramètres est obligatoire : -f/--freq, -d/--min, -u/--max et\n"
+"-g/--governor\n"
+
+#: utils/cpufreq-set.c:347
+#, c-format
+msgid "Setting cpu: %d\n"
+msgstr ""
+
+#: utils/cpupower-set.c:22
+#, c-format
+msgid "Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:24
+#, c-format
+msgid ""
+" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-set.c:26
+#, c-format
+msgid ""
+" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:27
+#, c-format
+msgid ""
+" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
+"policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:80
+#, c-format
+msgid "--perf-bias param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:91
+#, c-format
+msgid "--sched-mc param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:102
+#, c-format
+msgid "--sched-smt param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:121
+#, c-format
+msgid "Error setting sched-mc %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:127
+#, c-format
+msgid "Error setting sched-smt %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:146
+#, c-format
+msgid "Error setting perf-bias value on CPU %d\n"
+msgstr ""
+
+#: utils/cpupower-info.c:21
+#, c-format
+msgid "Usage: cpupower info [ -b ] [ -m ] [ -s ]\n"
+msgstr ""
+
+#: utils/cpupower-info.c:23
+#, c-format
+msgid ""
+" -b, --perf-bias Gets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-info.c:25
+#, fuzzy, c-format
+msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
+msgstr " -p, --policy Affiche la tactique actuelle de cpufreq *\n"
+
+#: utils/cpupower-info.c:26
+#, c-format
+msgid ""
+" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-info.c:28
+#, c-format
+msgid ""
+"\n"
+"Passing no option will show all info, by default only on core 0\n"
+msgstr ""
+
+#: utils/cpupower-info.c:102
+#, c-format
+msgid "System's multi core scheduler setting: "
+msgstr ""
+
+#. if sysfs file is missing it's: errno == ENOENT
+#: utils/cpupower-info.c:105 utils/cpupower-info.c:114
+#, c-format
+msgid "not supported\n"
+msgstr ""
+
+#: utils/cpupower-info.c:111
+#, c-format
+msgid "System's thread sibling scheduler setting: "
+msgstr ""
+
+#: utils/cpupower-info.c:126
+#, c-format
+msgid "Intel's performance bias setting needs root privileges\n"
+msgstr ""
+
+#: utils/cpupower-info.c:128
+#, c-format
+msgid "System does not support Intel's performance bias setting\n"
+msgstr ""
+
+#: utils/cpupower-info.c:147
+#, c-format
+msgid "Could not read perf-bias value\n"
+msgstr ""
+
+#: utils/cpupower-info.c:150
+#, c-format
+msgid "perf-bias: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:28
+#, fuzzy, c-format
+msgid "Analyzing CPU %d:\n"
+msgstr "analyse du CPU %d :\n"
+
+#: utils/cpuidle-info.c:32
+#, c-format
+msgid "CPU %u: No idle states\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:36
+#, c-format
+msgid "CPU %u: Can't read idle state info\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:41
+#, c-format
+msgid "Could not determine max idle state %u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:46
+#, c-format
+msgid "Number of idle states: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:48
+#, fuzzy, c-format
+msgid "Available idle states:"
+msgstr " plage de fréquence : "
+
+#: utils/cpuidle-info.c:71
+#, c-format
+msgid "Flags/Description: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:74
+#, c-format
+msgid "Latency: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:76
+#, c-format
+msgid "Usage: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:78
+#, c-format
+msgid "Duration: %llu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:90
+#, c-format
+msgid "Could not determine cpuidle driver\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:94
+#, fuzzy, c-format
+msgid "CPUidle driver: %s\n"
+msgstr " pilote : %s\n"
+
+#: utils/cpuidle-info.c:99
+#, c-format
+msgid "Could not determine cpuidle governor\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:103
+#, c-format
+msgid "CPUidle governor: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:122
+#, c-format
+msgid "CPU %u: Can't read C-state info\n"
+msgstr ""
+
+#. printf("Cstates: %d\n", cstates);
+#: utils/cpuidle-info.c:127
+#, c-format
+msgid "active state: C0\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:128
+#, c-format
+msgid "max_cstate: C%u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:129
+#, c-format
+msgid "maximum allowed latency: %lu usec\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:130
+#, c-format
+msgid "states:\t\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:132
+#, c-format
+msgid " C%d: type[C%d] "
+msgstr ""
+
+#: utils/cpuidle-info.c:134
+#, c-format
+msgid "promotion[--] demotion[--] "
+msgstr ""
+
+#: utils/cpuidle-info.c:135
+#, c-format
+msgid "latency[%03lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:137
+#, c-format
+msgid "usage[%08lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:139
+#, c-format
+msgid "duration[%020Lu] \n"
+msgstr ""
+
+#: utils/cpuidle-info.c:147
+#, fuzzy, c-format
+msgid "Usage: cpupower idleinfo [options]\n"
+msgstr "Usage : cpufreq-info [options]\n"
+
+#: utils/cpuidle-info.c:149
+#, fuzzy, c-format
+msgid " -s, --silent Only show general C-state information\n"
+msgstr " -e, --debug Afficher les informations de déboguage\n"
+
+#: utils/cpuidle-info.c:150
+#, fuzzy, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"acpi/processor/*/power\n"
+" interface in older kernels\n"
+msgstr ""
+" -o, --proc Affiche les informations en utilisant l'interface\n"
+" fournie par /proc/cpufreq, présente dans les "
+"versions\n"
+" 2.4 et les anciennes versions 2.6 du noyau\n"
+
+#: utils/cpuidle-info.c:209
+#, fuzzy, c-format
+msgid "You can't specify more than one output-specific argument\n"
+msgstr ""
+"On ne peut indiquer plus d'un paramètre --cpu, tout comme l'on ne peut\n"
+"spécifier plus d'un argument de formatage\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU CPU number which information shall be determined "
+#~ "about\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU Numéro du CPU pour lequel l'information sera "
+#~ "affichée\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU number of CPU where cpufreq settings shall be "
+#~ "modified\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU numéro du CPU à prendre en compte pour les\n"
+#~ " changements\n"
diff --git a/tools/power/cpupower/po/it.po b/tools/power/cpupower/po/it.po
new file mode 100644
index 00000000000..f80c4ddb9bd
--- /dev/null
+++ b/tools/power/cpupower/po/it.po
@@ -0,0 +1,961 @@
+# Italian translations for cpufrequtils package
+# Copyright (C) 2004-2009
+# This file is distributed under the same license as the cpufrequtils package.
+# Mattia Dongili <malattia@gmail.com>.
+#
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: cpufrequtils 0.3\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2011-03-08 17:03+0100\n"
+"PO-Revision-Date: 2009-08-15 12:00+0900\n"
+"Last-Translator: Mattia Dongili <malattia@gmail.com>\n"
+"Language-Team: NONE\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#: utils/idle_monitor/nhm_idle.c:36
+msgid "Processor Core C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:43
+msgid "Processor Core C6"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:51
+msgid "Processor Package C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:58 utils/idle_monitor/amd_fam14h_idle.c:70
+msgid "Processor Package C6"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:33
+msgid "Processor Core C7"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:40
+msgid "Processor Package C2"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:47
+msgid "Processor Package C7"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:56
+msgid "Package in sleep state (PC1 or deeper)"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:63
+msgid "Processor Package C1"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:77
+msgid "North Bridge P1 boolean counter (returns 0 or 1)"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:35
+msgid "Processor Core not idle"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:42
+msgid "Processor Core in an idle state"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:50
+msgid "Average Frequency (including boost) in MHz"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:66
+#, c-format
+msgid ""
+"cpupower monitor: [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:69
+#, c-format
+msgid ""
+"cpupower monitor: [-v] [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:71
+#, c-format
+msgid "\t -v: be more verbose\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:73
+#, c-format
+msgid "\t -h: print this help\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:74
+#, c-format
+msgid "\t -i: time intervall to measure for in seconds (default 1)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:75
+#, c-format
+msgid "\t -t: show CPU topology/hierarchy\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:76
+#, c-format
+msgid "\t -l: list available CPU sleep monitors (for use with -m)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:77
+#, c-format
+msgid "\t -m: show specific CPU sleep monitors only (in same order)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:79
+#, c-format
+msgid ""
+"only one of: -t, -l, -m are allowed\n"
+"If none of them is passed,"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:80
+#, c-format
+msgid " all supported monitors are shown\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:197
+#, c-format
+msgid "Monitor %s, Counter %s has no count function. Implementation error\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:207
+#, c-format
+msgid " *is offline\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:236
+#, c-format
+msgid "%s: max monitor name length (%d) exceeded\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:250
+#, c-format
+msgid "No matching monitor found in %s, try -l option\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:266
+#, c-format
+msgid "Monitor \"%s\" (%d states) - Might overflow after %u s\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:319
+#, c-format
+msgid "%s took %.5f seconds and exited with status %d\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:406
+#, c-format
+msgid "Cannot read number of available processors\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:417
+#, c-format
+msgid "Available monitor %s needs root access\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:428
+#, c-format
+msgid "No HW Cstate monitors found\n"
+msgstr ""
+
+#: utils/cpupower.c:78
+#, c-format
+msgid "cpupower [ -c cpulist ] subcommand [ARGS]\n"
+msgstr ""
+
+#: utils/cpupower.c:79
+#, c-format
+msgid "cpupower --version\n"
+msgstr ""
+
+#: utils/cpupower.c:80
+#, c-format
+msgid "Supported subcommands are:\n"
+msgstr ""
+
+#: utils/cpupower.c:83
+#, c-format
+msgid ""
+"\n"
+"Some subcommands can make use of the -c cpulist option.\n"
+msgstr ""
+
+#: utils/cpupower.c:84
+#, c-format
+msgid "Look at the general cpupower manpage how to use it\n"
+msgstr ""
+
+#: utils/cpupower.c:85
+#, c-format
+msgid "and read up the subcommand's manpage whether it is supported.\n"
+msgstr ""
+
+#: utils/cpupower.c:86
+#, c-format
+msgid ""
+"\n"
+"Use cpupower help subcommand for getting help for above subcommands.\n"
+msgstr ""
+
+#: utils/cpupower.c:91
+#, c-format
+msgid "Report errors and bugs to %s, please.\n"
+msgstr "Per favore, comunicare errori e malfunzionamenti a %s.\n"
+
+#: utils/cpupower.c:114
+#, c-format
+msgid "Error parsing cpu list\n"
+msgstr ""
+
+#: utils/cpupower.c:172
+#, c-format
+msgid "Subcommand %s needs root privileges\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:31
+#, c-format
+msgid "Couldn't count the number of CPUs (%s: %s), assuming 1\n"
+msgstr "Impossibile determinare il numero di CPU (%s: %s), assumo sia 1\n"
+
+#: utils/cpufreq-info.c:63
+#, c-format
+msgid ""
+" minimum CPU frequency - maximum CPU frequency - governor\n"
+msgstr ""
+" frequenza minima CPU - frequenza massima CPU - gestore\n"
+
+#: utils/cpufreq-info.c:151
+#, c-format
+msgid "Error while evaluating Boost Capabilities on CPU %d -- are you root?\n"
+msgstr ""
+
+#. P state changes via MSR are identified via cpuid 80000007
+#. on Intel and AMD, but we assume boost capable machines can do that
+#. if (cpuid_eax(0x80000000) >= 0x80000007
+#. && (cpuid_edx(0x80000007) & (1 << 7)))
+#.
+#: utils/cpufreq-info.c:161
+#, c-format
+msgid " boost state support: \n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163
+#, c-format
+msgid " Supported: %s\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "yes"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "no"
+msgstr ""
+
+#: utils/cpufreq-info.c:164
+#, fuzzy, c-format
+msgid " Active: %s\n"
+msgstr " modulo %s\n"
+
+#: utils/cpufreq-info.c:177
+#, c-format
+msgid " Boost States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:178
+#, c-format
+msgid " Total States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:181
+#, c-format
+msgid " Pstate-Pb%d: %luMHz (boost state)\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:184
+#, c-format
+msgid " Pstate-P%d: %luMHz\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:211
+#, c-format
+msgid " no or unknown cpufreq driver is active on this CPU\n"
+msgstr " nessun modulo o modulo cpufreq sconosciuto per questa CPU\n"
+
+#: utils/cpufreq-info.c:213
+#, c-format
+msgid " driver: %s\n"
+msgstr " modulo %s\n"
+
+#: utils/cpufreq-info.c:219
+#, c-format
+msgid " CPUs which run at the same hardware frequency: "
+msgstr " CPU che operano alla stessa frequenza hardware: "
+
+#: utils/cpufreq-info.c:230
+#, c-format
+msgid " CPUs which need to have their frequency coordinated by software: "
+msgstr " CPU che è necessario siano coordinate dal software: "
+
+#: utils/cpufreq-info.c:241
+#, c-format
+msgid " maximum transition latency: "
+msgstr " latenza massima durante la transizione: "
+
+#: utils/cpufreq-info.c:247
+#, c-format
+msgid " hardware limits: "
+msgstr " limiti hardware: "
+
+#: utils/cpufreq-info.c:256
+#, c-format
+msgid " available frequency steps: "
+msgstr " frequenze disponibili: "
+
+#: utils/cpufreq-info.c:269
+#, c-format
+msgid " available cpufreq governors: "
+msgstr " gestori disponibili: "
+
+#: utils/cpufreq-info.c:280
+#, c-format
+msgid " current policy: frequency should be within "
+msgstr " gestore attuale: la frequenza deve mantenersi tra "
+
+#: utils/cpufreq-info.c:282
+#, c-format
+msgid " and "
+msgstr " e "
+
+#: utils/cpufreq-info.c:286
+#, c-format
+msgid ""
+"The governor \"%s\" may decide which speed to use\n"
+" within this range.\n"
+msgstr ""
+" Il gestore \"%s\" può decidere quale velocità usare\n"
+" in questo intervallo.\n"
+
+#: utils/cpufreq-info.c:293
+#, c-format
+msgid " current CPU frequency is "
+msgstr " la frequenza attuale della CPU è "
+
+#: utils/cpufreq-info.c:296
+#, c-format
+msgid " (asserted by call to hardware)"
+msgstr " (ottenuta da una chiamata diretta all'hardware)"
+
+#: utils/cpufreq-info.c:304
+#, c-format
+msgid " cpufreq stats: "
+msgstr " statistiche cpufreq:"
+
+#: utils/cpufreq-info.c:472
+#, fuzzy, c-format
+msgid "Usage: cpupower freqinfo [options]\n"
+msgstr "Uso: cpufreq-info [opzioni]\n"
+
+#: utils/cpufreq-info.c:473 utils/cpufreq-set.c:26 utils/cpupower-set.c:23
+#: utils/cpupower-info.c:22 utils/cpuidle-info.c:148
+#, c-format
+msgid "Options:\n"
+msgstr "Opzioni:\n"
+
+#: utils/cpufreq-info.c:474
+#, fuzzy, c-format
+msgid " -e, --debug Prints out debug information [default]\n"
+msgstr " -e, --debug Mostra informazioni di debug\n"
+
+#: utils/cpufreq-info.c:475
+#, c-format
+msgid ""
+" -f, --freq Get frequency the CPU currently runs at, according\n"
+" to the cpufreq core *\n"
+msgstr ""
+" -f, --freq Mostra la frequenza attuale della CPU secondo\n"
+" il modulo cpufreq *\n"
+
+#: utils/cpufreq-info.c:477
+#, c-format
+msgid ""
+" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n"
+" it from hardware (only available to root) *\n"
+msgstr ""
+" -w, --hwfreq Mostra la frequenza attuale della CPU leggendola\n"
+" dall'hardware (disponibile solo per l'utente root) *\n"
+
+#: utils/cpufreq-info.c:479
+#, c-format
+msgid ""
+" -l, --hwlimits Determine the minimum and maximum CPU frequency "
+"allowed *\n"
+msgstr ""
+" -l, --hwlimits Determina le frequenze minima e massima possibili per "
+"la CPU *\n"
+
+#: utils/cpufreq-info.c:480
+#, c-format
+msgid " -d, --driver Determines the used cpufreq kernel driver *\n"
+msgstr ""
+" -d, --driver Determina il modulo cpufreq del kernel in uso *\n"
+
+#: utils/cpufreq-info.c:481
+#, c-format
+msgid " -p, --policy Gets the currently used cpufreq policy *\n"
+msgstr ""
+" -p, --policy Mostra il gestore cpufreq attualmente in uso *\n"
+
+#: utils/cpufreq-info.c:482
+#, c-format
+msgid " -g, --governors Determines available cpufreq governors *\n"
+msgstr " -g, --governors Determina i gestori cpufreq disponibili *\n"
+
+#: utils/cpufreq-info.c:483
+#, c-format
+msgid ""
+" -r, --related-cpus Determines which CPUs run at the same hardware "
+"frequency *\n"
+msgstr ""
+" -r, --related-cpus Determina quali CPU operano alla stessa frequenza *\n"
+
+#: utils/cpufreq-info.c:484
+#, c-format
+msgid ""
+" -a, --affected-cpus Determines which CPUs need to have their frequency\n"
+" coordinated by software *\n"
+msgstr ""
+" -a, --affected-cpus Determina quali CPU devono avere la frequenza\n"
+" coordinata dal software *\n"
+
+#: utils/cpufreq-info.c:486
+#, c-format
+msgid " -s, --stats Shows cpufreq statistics if available\n"
+msgstr " -s, --stats Mostra le statistiche se disponibili\n"
+
+#: utils/cpufreq-info.c:487
+#, c-format
+msgid ""
+" -y, --latency Determines the maximum latency on CPU frequency "
+"changes *\n"
+msgstr ""
+" -y, --latency Determina la latenza massima durante i cambi di "
+"frequenza *\n"
+
+#: utils/cpufreq-info.c:488
+#, c-format
+msgid " -b, --boost Checks for turbo or boost modes *\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:489
+#, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"cpufreq\n"
+" interface in 2.4. and early 2.6. kernels\n"
+msgstr ""
+" -o, --proc Stampa le informazioni come se provenissero dalla\n"
+" interfaccia cpufreq /proc/ presente nei kernel\n"
+" 2.4 ed i primi 2.6\n"
+
+#: utils/cpufreq-info.c:491
+#, c-format
+msgid ""
+" -m, --human human-readable output for the -f, -w, -s and -y "
+"parameters\n"
+msgstr ""
+" -m, --human formatta l'output delle opzioni -f, -w, -s e -y in "
+"maniera\n"
+" leggibile da un essere umano\n"
+
+#: utils/cpufreq-info.c:492 utils/cpuidle-info.c:152
+#, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help Stampa questa schermata\n"
+
+#: utils/cpufreq-info.c:495
+#, c-format
+msgid ""
+"If no argument or only the -c, --cpu parameter is given, debug output about\n"
+"cpufreq is printed which is useful e.g. for reporting bugs.\n"
+msgstr ""
+"Se non viene specificata nessuna opzione o viene specificata solo l'opzione -"
+"c, --cpu,\n"
+"le informazioni di debug per cpufreq saranno utili ad esempio a riportare i "
+"bug.\n"
+
+#: utils/cpufreq-info.c:497
+#, c-format
+msgid ""
+"For the arguments marked with *, omitting the -c or --cpu argument is\n"
+"equivalent to setting it to zero\n"
+msgstr ""
+"Per le opzioni segnalate con *, omettere l'opzione -c o --cpu è come "
+"specificarla\n"
+"con il valore 0\n"
+
+#: utils/cpufreq-info.c:580
+#, c-format
+msgid ""
+"The argument passed to this tool can't be combined with passing a --cpu "
+"argument\n"
+msgstr ""
+"L'opzione specificata a questo programma non può essere combinata con --cpu\n"
+
+#: utils/cpufreq-info.c:596
+#, c-format
+msgid ""
+"You can't specify more than one --cpu parameter and/or\n"
+"more than one output-specific argument\n"
+msgstr ""
+"Non è possibile specificare più di una volta l'opzione --cpu e/o\n"
+"specificare più di un parametro di output specifico\n"
+
+#: utils/cpufreq-info.c:600 utils/cpufreq-set.c:82 utils/cpupower-set.c:42
+#: utils/cpupower-info.c:42 utils/cpuidle-info.c:213
+#, c-format
+msgid "invalid or unknown argument\n"
+msgstr "opzione sconosciuta o non valida\n"
+
+#: utils/cpufreq-info.c:617
+#, c-format
+msgid "couldn't analyze CPU %d as it doesn't seem to be present\n"
+msgstr "impossibile analizzare la CPU %d poiché non sembra essere presente\n"
+
+#: utils/cpufreq-info.c:620 utils/cpupower-info.c:142
+#, c-format
+msgid "analyzing CPU %d:\n"
+msgstr "analisi della CPU %d:\n"
+
+#: utils/cpufreq-set.c:25
+#, fuzzy, c-format
+msgid "Usage: cpupower frequency-set [options]\n"
+msgstr "Uso: cpufreq-set [opzioni]\n"
+
+#: utils/cpufreq-set.c:27
+#, c-format
+msgid ""
+" -d FREQ, --min FREQ new minimum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -d FREQ, --min FREQ la nuova frequenza minima che il gestore cpufreq "
+"può scegliere\n"
+
+#: utils/cpufreq-set.c:28
+#, c-format
+msgid ""
+" -u FREQ, --max FREQ new maximum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -u FREQ, --max FREQ la nuova frequenza massima che il gestore cpufreq "
+"può scegliere\n"
+
+#: utils/cpufreq-set.c:29
+#, c-format
+msgid " -g GOV, --governor GOV new cpufreq governor\n"
+msgstr " -g GOV, --governor GOV nuovo gestore cpufreq\n"
+
+#: utils/cpufreq-set.c:30
+#, c-format
+msgid ""
+" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n"
+" governor to be available and loaded\n"
+msgstr ""
+" -f FREQ, --freq FREQ specifica la frequenza a cui impostare la CPU.\n"
+" È necessario che il gestore userspace sia "
+"disponibile e caricato\n"
+
+#: utils/cpufreq-set.c:32
+#, c-format
+msgid " -r, --related Switches all hardware-related CPUs\n"
+msgstr ""
+" -r, --related Modifica tutte le CPU coordinate dall'hardware\n"
+
+#: utils/cpufreq-set.c:33 utils/cpupower-set.c:28 utils/cpupower-info.c:27
+#, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help Stampa questa schermata\n"
+
+#: utils/cpufreq-set.c:35
+#, fuzzy, c-format
+msgid ""
+"Notes:\n"
+"1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n"
+msgstr ""
+"Per le opzioni segnalate con *, omettere l'opzione -c o --cpu è come "
+"specificarla\n"
+"con il valore 0\n"
+
+#: utils/cpufreq-set.c:37
+#, fuzzy, c-format
+msgid ""
+"2. The -f FREQ, --freq FREQ parameter cannot be combined with any other "
+"parameter\n"
+" except the -c CPU, --cpu CPU parameter\n"
+"3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n"
+" by postfixing the value with the wanted unit name, without any space\n"
+" (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"
+msgstr ""
+"Note:\n"
+"1. Omettere l'opzione -c o --cpu è equivalente a impostarlo a 0\n"
+"2. l'opzione -f FREQ, --freq FREQ non può essere specificata con altre "
+"opzioni\n"
+" ad eccezione dell'opzione -c CPU o --cpu CPU\n"
+"3. le FREQuenze possono essere specuficate in Hz, kHz (default), MHz, GHz, "
+"or THz\n"
+" postponendo l'unità di misura al valore senza nessuno spazio fra loro\n"
+" (FREQuenza in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"
+
+#: utils/cpufreq-set.c:57
+#, c-format
+msgid ""
+"Error setting new values. Common errors:\n"
+"- Do you have proper administration rights? (super-user?)\n"
+"- Is the governor you requested available and modprobed?\n"
+"- Trying to set an invalid policy?\n"
+"- Trying to set a specific frequency, but userspace governor is not "
+"available,\n"
+" for example because of hardware which cannot be set to a specific "
+"frequency\n"
+" or because the userspace governor isn't loaded?\n"
+msgstr ""
+"Si sono verificati degli errori impostando i nuovi valori.\n"
+"Alcuni errori comuni possono essere:\n"
+"- Hai i necessari diritti di amministrazione? (super-user?)\n"
+"- Il gestore che hai richiesto è disponibile e caricato?\n"
+"- Stai provando ad impostare una politica di gestione non valida?\n"
+"- Stai provando a impostare una specifica frequenza ma il gestore\n"
+" userspace non è disponibile, per esempio a causa dell'hardware\n"
+" che non supporta frequenze fisse o a causa del fatto che\n"
+" il gestore userspace non è caricato?\n"
+
+#: utils/cpufreq-set.c:170
+#, c-format
+msgid "wrong, unknown or unhandled CPU?\n"
+msgstr "CPU errata, sconosciuta o non gestita?\n"
+
+#: utils/cpufreq-set.c:302
+#, c-format
+msgid ""
+"the -f/--freq parameter cannot be combined with -d/--min, -u/--max or\n"
+"-g/--governor parameters\n"
+msgstr ""
+"l'opzione -f/--freq non può venire combinata con i parametri\n"
+" -d/--min, -u/--max o -g/--governor\n"
+
+#: utils/cpufreq-set.c:308
+#, c-format
+msgid ""
+"At least one parameter out of -f/--freq, -d/--min, -u/--max, and\n"
+"-g/--governor must be passed\n"
+msgstr ""
+"Almeno una delle opzioni -f/--freq, -d/--min, -u/--max, e -g/--governor\n"
+"deve essere specificata\n"
+
+#: utils/cpufreq-set.c:347
+#, c-format
+msgid "Setting cpu: %d\n"
+msgstr ""
+
+#: utils/cpupower-set.c:22
+#, c-format
+msgid "Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:24
+#, c-format
+msgid ""
+" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-set.c:26
+#, c-format
+msgid ""
+" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:27
+#, c-format
+msgid ""
+" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
+"policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:80
+#, c-format
+msgid "--perf-bias param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:91
+#, c-format
+msgid "--sched-mc param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:102
+#, c-format
+msgid "--sched-smt param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:121
+#, c-format
+msgid "Error setting sched-mc %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:127
+#, c-format
+msgid "Error setting sched-smt %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:146
+#, c-format
+msgid "Error setting perf-bias value on CPU %d\n"
+msgstr ""
+
+#: utils/cpupower-info.c:21
+#, c-format
+msgid "Usage: cpupower info [ -b ] [ -m ] [ -s ]\n"
+msgstr ""
+
+#: utils/cpupower-info.c:23
+#, c-format
+msgid ""
+" -b, --perf-bias Gets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-info.c:25
+#, fuzzy, c-format
+msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
+msgstr ""
+" -p, --policy Mostra il gestore cpufreq attualmente in uso *\n"
+
+#: utils/cpupower-info.c:26
+#, c-format
+msgid ""
+" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-info.c:28
+#, c-format
+msgid ""
+"\n"
+"Passing no option will show all info, by default only on core 0\n"
+msgstr ""
+
+#: utils/cpupower-info.c:102
+#, c-format
+msgid "System's multi core scheduler setting: "
+msgstr ""
+
+#. if sysfs file is missing it's: errno == ENOENT
+#: utils/cpupower-info.c:105 utils/cpupower-info.c:114
+#, c-format
+msgid "not supported\n"
+msgstr ""
+
+#: utils/cpupower-info.c:111
+#, c-format
+msgid "System's thread sibling scheduler setting: "
+msgstr ""
+
+#: utils/cpupower-info.c:126
+#, c-format
+msgid "Intel's performance bias setting needs root privileges\n"
+msgstr ""
+
+#: utils/cpupower-info.c:128
+#, c-format
+msgid "System does not support Intel's performance bias setting\n"
+msgstr ""
+
+#: utils/cpupower-info.c:147
+#, c-format
+msgid "Could not read perf-bias value\n"
+msgstr ""
+
+#: utils/cpupower-info.c:150
+#, c-format
+msgid "perf-bias: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:28
+#, fuzzy, c-format
+msgid "Analyzing CPU %d:\n"
+msgstr "analisi della CPU %d:\n"
+
+#: utils/cpuidle-info.c:32
+#, c-format
+msgid "CPU %u: No idle states\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:36
+#, c-format
+msgid "CPU %u: Can't read idle state info\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:41
+#, c-format
+msgid "Could not determine max idle state %u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:46
+#, c-format
+msgid "Number of idle states: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:48
+#, fuzzy, c-format
+msgid "Available idle states:"
+msgstr " frequenze disponibili: "
+
+#: utils/cpuidle-info.c:71
+#, c-format
+msgid "Flags/Description: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:74
+#, c-format
+msgid "Latency: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:76
+#, c-format
+msgid "Usage: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:78
+#, c-format
+msgid "Duration: %llu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:90
+#, c-format
+msgid "Could not determine cpuidle driver\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:94
+#, fuzzy, c-format
+msgid "CPUidle driver: %s\n"
+msgstr " modulo %s\n"
+
+#: utils/cpuidle-info.c:99
+#, c-format
+msgid "Could not determine cpuidle governor\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:103
+#, c-format
+msgid "CPUidle governor: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:122
+#, c-format
+msgid "CPU %u: Can't read C-state info\n"
+msgstr ""
+
+#. printf("Cstates: %d\n", cstates);
+#: utils/cpuidle-info.c:127
+#, c-format
+msgid "active state: C0\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:128
+#, c-format
+msgid "max_cstate: C%u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:129
+#, fuzzy, c-format
+msgid "maximum allowed latency: %lu usec\n"
+msgstr " latenza massima durante la transizione: "
+
+#: utils/cpuidle-info.c:130
+#, c-format
+msgid "states:\t\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:132
+#, c-format
+msgid " C%d: type[C%d] "
+msgstr ""
+
+#: utils/cpuidle-info.c:134
+#, c-format
+msgid "promotion[--] demotion[--] "
+msgstr ""
+
+#: utils/cpuidle-info.c:135
+#, c-format
+msgid "latency[%03lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:137
+#, c-format
+msgid "usage[%08lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:139
+#, c-format
+msgid "duration[%020Lu] \n"
+msgstr ""
+
+#: utils/cpuidle-info.c:147
+#, fuzzy, c-format
+msgid "Usage: cpupower idleinfo [options]\n"
+msgstr "Uso: cpufreq-info [opzioni]\n"
+
+#: utils/cpuidle-info.c:149
+#, fuzzy, c-format
+msgid " -s, --silent Only show general C-state information\n"
+msgstr " -e, --debug Mostra informazioni di debug\n"
+
+#: utils/cpuidle-info.c:150
+#, fuzzy, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"acpi/processor/*/power\n"
+" interface in older kernels\n"
+msgstr ""
+" -o, --proc Stampa le informazioni come se provenissero dalla\n"
+" interfaccia cpufreq /proc/ presente nei kernel\n"
+" 2.4 ed i primi 2.6\n"
+
+#: utils/cpuidle-info.c:209
+#, fuzzy, c-format
+msgid "You can't specify more than one output-specific argument\n"
+msgstr ""
+"Non è possibile specificare più di una volta l'opzione --cpu e/o\n"
+"specificare più di un parametro di output specifico\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU CPU number which information shall be determined "
+#~ "about\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU Numero di CPU per la quale ottenere le "
+#~ "informazioni\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU number of CPU where cpufreq settings shall be "
+#~ "modified\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU numero di CPU per la quale modificare le "
+#~ "impostazioni\n"
+
+#, fuzzy
+#~ msgid " CPUs which coordinate software frequency requirements: "
+#~ msgstr ""
+#~ " CPU per le quali e` necessario cambiare la frequenza "
+#~ "contemporaneamente: "
diff --git a/tools/power/cpupower/po/pt.po b/tools/power/cpupower/po/pt.po
new file mode 100644
index 00000000000..990f5267ffe
--- /dev/null
+++ b/tools/power/cpupower/po/pt.po
@@ -0,0 +1,957 @@
+# Brazilian Portuguese translations for cpufrequtils package
+# Copyright (C) 2008 THE cpufrequtils'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the cpufrequtils package.
+# Claudio Eduardo <claudioeddy@gmail.com>, 2009.
+#
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: cpufrequtils 004\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2011-03-08 17:03+0100\n"
+"PO-Revision-Date: 2008-06-14 22:16-0400\n"
+"Last-Translator: Claudio Eduardo <claudioeddy@gmail.com>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#: utils/idle_monitor/nhm_idle.c:36
+msgid "Processor Core C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:43
+msgid "Processor Core C6"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:51
+msgid "Processor Package C3"
+msgstr ""
+
+#: utils/idle_monitor/nhm_idle.c:58 utils/idle_monitor/amd_fam14h_idle.c:70
+msgid "Processor Package C6"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:33
+msgid "Processor Core C7"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:40
+msgid "Processor Package C2"
+msgstr ""
+
+#: utils/idle_monitor/snb_idle.c:47
+msgid "Processor Package C7"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:56
+msgid "Package in sleep state (PC1 or deeper)"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:63
+msgid "Processor Package C1"
+msgstr ""
+
+#: utils/idle_monitor/amd_fam14h_idle.c:77
+msgid "North Bridge P1 boolean counter (returns 0 or 1)"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:35
+msgid "Processor Core not idle"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:42
+msgid "Processor Core in an idle state"
+msgstr ""
+
+#: utils/idle_monitor/mperf_monitor.c:50
+msgid "Average Frequency (including boost) in MHz"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:66
+#, c-format
+msgid ""
+"cpupower monitor: [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:69
+#, c-format
+msgid ""
+"cpupower monitor: [-v] [-h] [ [-t] | [-l] | [-m <mon1>,[<mon2>] ] ] [-i "
+"interval_sec | -c command ...]\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:71
+#, c-format
+msgid "\t -v: be more verbose\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:73
+#, c-format
+msgid "\t -h: print this help\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:74
+#, c-format
+msgid "\t -i: time intervall to measure for in seconds (default 1)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:75
+#, c-format
+msgid "\t -t: show CPU topology/hierarchy\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:76
+#, c-format
+msgid "\t -l: list available CPU sleep monitors (for use with -m)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:77
+#, c-format
+msgid "\t -m: show specific CPU sleep monitors only (in same order)\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:79
+#, c-format
+msgid ""
+"only one of: -t, -l, -m are allowed\n"
+"If none of them is passed,"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:80
+#, c-format
+msgid " all supported monitors are shown\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:197
+#, c-format
+msgid "Monitor %s, Counter %s has no count function. Implementation error\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:207
+#, c-format
+msgid " *is offline\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:236
+#, c-format
+msgid "%s: max monitor name length (%d) exceeded\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:250
+#, c-format
+msgid "No matching monitor found in %s, try -l option\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:266
+#, c-format
+msgid "Monitor \"%s\" (%d states) - Might overflow after %u s\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:319
+#, c-format
+msgid "%s took %.5f seconds and exited with status %d\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:406
+#, c-format
+msgid "Cannot read number of available processors\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:417
+#, c-format
+msgid "Available monitor %s needs root access\n"
+msgstr ""
+
+#: utils/idle_monitor/cpupower-monitor.c:428
+#, c-format
+msgid "No HW Cstate monitors found\n"
+msgstr ""
+
+#: utils/cpupower.c:78
+#, c-format
+msgid "cpupower [ -c cpulist ] subcommand [ARGS]\n"
+msgstr ""
+
+#: utils/cpupower.c:79
+#, c-format
+msgid "cpupower --version\n"
+msgstr ""
+
+#: utils/cpupower.c:80
+#, c-format
+msgid "Supported subcommands are:\n"
+msgstr ""
+
+#: utils/cpupower.c:83
+#, c-format
+msgid ""
+"\n"
+"Some subcommands can make use of the -c cpulist option.\n"
+msgstr ""
+
+#: utils/cpupower.c:84
+#, c-format
+msgid "Look at the general cpupower manpage how to use it\n"
+msgstr ""
+
+#: utils/cpupower.c:85
+#, c-format
+msgid "and read up the subcommand's manpage whether it is supported.\n"
+msgstr ""
+
+#: utils/cpupower.c:86
+#, c-format
+msgid ""
+"\n"
+"Use cpupower help subcommand for getting help for above subcommands.\n"
+msgstr ""
+
+#: utils/cpupower.c:91
+#, c-format
+msgid "Report errors and bugs to %s, please.\n"
+msgstr "Reporte erros e bugs para %s, por favor.\n"
+
+#: utils/cpupower.c:114
+#, c-format
+msgid "Error parsing cpu list\n"
+msgstr ""
+
+#: utils/cpupower.c:172
+#, c-format
+msgid "Subcommand %s needs root privileges\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:31
+#, c-format
+msgid "Couldn't count the number of CPUs (%s: %s), assuming 1\n"
+msgstr "Não foi possível contar o número de CPUs (%s: %s), assumindo 1\n"
+
+#: utils/cpufreq-info.c:63
+#, c-format
+msgid ""
+" minimum CPU frequency - maximum CPU frequency - governor\n"
+msgstr ""
+" frequência mínina do CPU - frequência máxima do CPU - "
+"regulador\n"
+
+#: utils/cpufreq-info.c:151
+#, c-format
+msgid "Error while evaluating Boost Capabilities on CPU %d -- are you root?\n"
+msgstr ""
+
+#. P state changes via MSR are identified via cpuid 80000007
+#. on Intel and AMD, but we assume boost capable machines can do that
+#. if (cpuid_eax(0x80000000) >= 0x80000007
+#. && (cpuid_edx(0x80000007) & (1 << 7)))
+#.
+#: utils/cpufreq-info.c:161
+#, c-format
+msgid " boost state support: \n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163
+#, c-format
+msgid " Supported: %s\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "yes"
+msgstr ""
+
+#: utils/cpufreq-info.c:163 utils/cpufreq-info.c:164
+msgid "no"
+msgstr ""
+
+#: utils/cpufreq-info.c:164
+#, fuzzy, c-format
+msgid " Active: %s\n"
+msgstr " driver: %s\n"
+
+#: utils/cpufreq-info.c:177
+#, c-format
+msgid " Boost States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:178
+#, c-format
+msgid " Total States: %d\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:181
+#, c-format
+msgid " Pstate-Pb%d: %luMHz (boost state)\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:184
+#, c-format
+msgid " Pstate-P%d: %luMHz\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:211
+#, c-format
+msgid " no or unknown cpufreq driver is active on this CPU\n"
+msgstr " nenhum ou driver do cpufreq deconhecido está ativo nesse CPU\n"
+
+#: utils/cpufreq-info.c:213
+#, c-format
+msgid " driver: %s\n"
+msgstr " driver: %s\n"
+
+#: utils/cpufreq-info.c:219
+#, c-format
+msgid " CPUs which run at the same hardware frequency: "
+msgstr " CPUs que rodam na mesma frequência de hardware: "
+
+#: utils/cpufreq-info.c:230
+#, c-format
+msgid " CPUs which need to have their frequency coordinated by software: "
+msgstr " CPUs que precisam ter suas frequências coordenadas por software: "
+
+#: utils/cpufreq-info.c:241
+#, c-format
+msgid " maximum transition latency: "
+msgstr " maior latência de transição: "
+
+#: utils/cpufreq-info.c:247
+#, c-format
+msgid " hardware limits: "
+msgstr " limites do hardware: "
+
+#: utils/cpufreq-info.c:256
+#, c-format
+msgid " available frequency steps: "
+msgstr " níveis de frequência disponíveis: "
+
+#: utils/cpufreq-info.c:269
+#, c-format
+msgid " available cpufreq governors: "
+msgstr " reguladores do cpufreq disponíveis: "
+
+#: utils/cpufreq-info.c:280
+#, c-format
+msgid " current policy: frequency should be within "
+msgstr " política de frequência atual deve estar entre "
+
+#: utils/cpufreq-info.c:282
+#, c-format
+msgid " and "
+msgstr " e "
+
+#: utils/cpufreq-info.c:286
+#, c-format
+msgid ""
+"The governor \"%s\" may decide which speed to use\n"
+" within this range.\n"
+msgstr ""
+"O regulador \"%s\" deve decidir qual velocidade usar\n"
+" dentro desse limite.\n"
+
+#: utils/cpufreq-info.c:293
+#, c-format
+msgid " current CPU frequency is "
+msgstr " frequência atual do CPU é "
+
+#: utils/cpufreq-info.c:296
+#, c-format
+msgid " (asserted by call to hardware)"
+msgstr " (declarado por chamada ao hardware)"
+
+#: utils/cpufreq-info.c:304
+#, c-format
+msgid " cpufreq stats: "
+msgstr " status do cpufreq: "
+
+#: utils/cpufreq-info.c:472
+#, fuzzy, c-format
+msgid "Usage: cpupower freqinfo [options]\n"
+msgstr "Uso: cpufreq-info [opções]\n"
+
+#: utils/cpufreq-info.c:473 utils/cpufreq-set.c:26 utils/cpupower-set.c:23
+#: utils/cpupower-info.c:22 utils/cpuidle-info.c:148
+#, c-format
+msgid "Options:\n"
+msgstr "Opções:\n"
+
+#: utils/cpufreq-info.c:474
+#, fuzzy, c-format
+msgid " -e, --debug Prints out debug information [default]\n"
+msgstr " -e, --debug Mostra informação de debug\n"
+
+#: utils/cpufreq-info.c:475
+#, c-format
+msgid ""
+" -f, --freq Get frequency the CPU currently runs at, according\n"
+" to the cpufreq core *\n"
+msgstr ""
+" -f, --freq Obtem a frequência na qual o CPU roda no momento, de "
+"acordo\n"
+" com o núcleo do cpufreq *\n"
+
+#: utils/cpufreq-info.c:477
+#, c-format
+msgid ""
+" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n"
+" it from hardware (only available to root) *\n"
+msgstr ""
+" -w, --hwfreq Obtem a frequência na qual o CPU está operando no "
+"momento,\n"
+" através de leitura no hardware (disponível somente "
+"para root) *\n"
+
+#: utils/cpufreq-info.c:479
+#, c-format
+msgid ""
+" -l, --hwlimits Determine the minimum and maximum CPU frequency "
+"allowed *\n"
+msgstr ""
+" -l, --hwlimits Determina a frequência mínima e máxima do CPU "
+"permitida *\n"
+
+#: utils/cpufreq-info.c:480
+#, c-format
+msgid " -d, --driver Determines the used cpufreq kernel driver *\n"
+msgstr ""
+" -d, --driver Determina o driver do kernel do cpufreq usado *\n"
+
+#: utils/cpufreq-info.c:481
+#, c-format
+msgid " -p, --policy Gets the currently used cpufreq policy *\n"
+msgstr ""
+"--p, --policy Obtem a política do cpufreq em uso no momento *\n"
+
+#: utils/cpufreq-info.c:482
+#, c-format
+msgid " -g, --governors Determines available cpufreq governors *\n"
+msgstr ""
+" -g, --governors Determina reguladores do cpufreq disponíveis *\n"
+
+#: utils/cpufreq-info.c:483
+#, c-format
+msgid ""
+" -r, --related-cpus Determines which CPUs run at the same hardware "
+"frequency *\n"
+msgstr ""
+" -r, --related-cpus Determina quais CPUs rodam na mesma frequência de "
+"hardware *\n"
+
+#: utils/cpufreq-info.c:484
+#, c-format
+msgid ""
+" -a, --affected-cpus Determines which CPUs need to have their frequency\n"
+" coordinated by software *\n"
+msgstr ""
+" -a, --affected-cpus Determina quais CPUs precisam ter suas frequências\n"
+" coordenadas por software *\n"
+
+#: utils/cpufreq-info.c:486
+#, c-format
+msgid " -s, --stats Shows cpufreq statistics if available\n"
+msgstr " -s, --stats Mostra estatísticas do cpufreq se disponíveis\n"
+
+#: utils/cpufreq-info.c:487
+#, c-format
+msgid ""
+" -y, --latency Determines the maximum latency on CPU frequency "
+"changes *\n"
+msgstr ""
+" -y, --latency Determina a latência máxima nas trocas de frequência "
+"do CPU *\n"
+
+#: utils/cpufreq-info.c:488
+#, c-format
+msgid " -b, --boost Checks for turbo or boost modes *\n"
+msgstr ""
+
+#: utils/cpufreq-info.c:489
+#, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"cpufreq\n"
+" interface in 2.4. and early 2.6. kernels\n"
+msgstr ""
+" -o, --proc Mostra informação do tipo provida pela interface /"
+"proc/cpufreq\n"
+" em kernels 2.4. e mais recentes 2.6\n"
+
+#: utils/cpufreq-info.c:491
+#, c-format
+msgid ""
+" -m, --human human-readable output for the -f, -w, -s and -y "
+"parameters\n"
+msgstr ""
+" -m, --human saída legível para humanos para os parâmetros -f, -w, "
+"-s e -y\n"
+
+#: utils/cpufreq-info.c:492 utils/cpuidle-info.c:152
+#, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help Imprime essa tela\n"
+
+#: utils/cpufreq-info.c:495
+#, c-format
+msgid ""
+"If no argument or only the -c, --cpu parameter is given, debug output about\n"
+"cpufreq is printed which is useful e.g. for reporting bugs.\n"
+msgstr ""
+"Se nenhum argumento ou somente o parâmetro -c, --cpu é dado, informação de "
+"debug sobre\n"
+"o cpufreq é mostrada, o que é útil por exemplo para reportar bugs.\n"
+
+#: utils/cpufreq-info.c:497
+#, c-format
+msgid ""
+"For the arguments marked with *, omitting the -c or --cpu argument is\n"
+"equivalent to setting it to zero\n"
+msgstr ""
+"Para os argumentos marcados com *, omitir o argumento -c ou --cpu é\n"
+"equivalente a setá-lo como zero\n"
+
+#: utils/cpufreq-info.c:580
+#, c-format
+msgid ""
+"The argument passed to this tool can't be combined with passing a --cpu "
+"argument\n"
+msgstr ""
+"O argumento usado pra essa ferramenta não pode ser combinado com um "
+"argumento --cpu\n"
+
+#: utils/cpufreq-info.c:596
+#, c-format
+msgid ""
+"You can't specify more than one --cpu parameter and/or\n"
+"more than one output-specific argument\n"
+msgstr ""
+"Você não pode especificar mais do que um parâmetro --cpu e/ou\n"
+"mais do que um argumento de saída específico\n"
+
+#: utils/cpufreq-info.c:600 utils/cpufreq-set.c:82 utils/cpupower-set.c:42
+#: utils/cpupower-info.c:42 utils/cpuidle-info.c:213
+#, c-format
+msgid "invalid or unknown argument\n"
+msgstr "argumento inválido ou desconhecido\n"
+
+#: utils/cpufreq-info.c:617
+#, c-format
+msgid "couldn't analyze CPU %d as it doesn't seem to be present\n"
+msgstr ""
+"não foi possível analisar o CPU % já que o mesmo parece não estar presente\n"
+
+#: utils/cpufreq-info.c:620 utils/cpupower-info.c:142
+#, c-format
+msgid "analyzing CPU %d:\n"
+msgstr "analisando o CPU %d:\n"
+
+#: utils/cpufreq-set.c:25
+#, fuzzy, c-format
+msgid "Usage: cpupower frequency-set [options]\n"
+msgstr "Uso: cpufreq-set [opções]\n"
+
+#: utils/cpufreq-set.c:27
+#, c-format
+msgid ""
+" -d FREQ, --min FREQ new minimum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -d FREQ, --min FREQ nova frequência mínima do CPU que o regulador "
+"deve selecionar\n"
+
+#: utils/cpufreq-set.c:28
+#, c-format
+msgid ""
+" -u FREQ, --max FREQ new maximum CPU frequency the governor may "
+"select\n"
+msgstr ""
+" -u FREQ, --max FREQ nova frequência máxima do CPU que o regulador "
+"deve escolher\n"
+
+#: utils/cpufreq-set.c:29
+#, c-format
+msgid " -g GOV, --governor GOV new cpufreq governor\n"
+msgstr " -g GOV, --governor GOV novo regulador do cpufreq\n"
+
+#: utils/cpufreq-set.c:30
+#, c-format
+msgid ""
+" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n"
+" governor to be available and loaded\n"
+msgstr ""
+" -f FREQ, --freq FREQ frequência específica para ser setada. Necessita "
+"que o regulador em\n"
+" nível de usuário esteja disponível e carregado\n"
+
+#: utils/cpufreq-set.c:32
+#, c-format
+msgid " -r, --related Switches all hardware-related CPUs\n"
+msgstr ""
+" -r, --related Modifica todos os CPUs relacionados ao hardware\n"
+
+#: utils/cpufreq-set.c:33 utils/cpupower-set.c:28 utils/cpupower-info.c:27
+#, c-format
+msgid " -h, --help Prints out this screen\n"
+msgstr " -h, --help Mostra essa tela\n"
+
+#: utils/cpufreq-set.c:35
+#, fuzzy, c-format
+msgid ""
+"Notes:\n"
+"1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n"
+msgstr ""
+"Para os argumentos marcados com *, omitir o argumento -c ou --cpu é\n"
+"equivalente a setá-lo como zero\n"
+
+#: utils/cpufreq-set.c:37
+#, fuzzy, c-format
+msgid ""
+"2. The -f FREQ, --freq FREQ parameter cannot be combined with any other "
+"parameter\n"
+" except the -c CPU, --cpu CPU parameter\n"
+"3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n"
+" by postfixing the value with the wanted unit name, without any space\n"
+" (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"
+msgstr ""
+"Notas:\n"
+"1. Omitir o argumento -c or --cpu é equivalente a setá-lo como zero\n"
+"2. O parâmetro -f FREQ, --freq FREQ não pode ser combinado com qualquer "
+"outro parâmetro\n"
+" exceto com o parâmetro -c CPU, --cpu CPU\n"
+"3. FREQuências podem ser usadas em Hz, kHz (padrão), MHz, GHz, o THz\n"
+" colocando o nome desejado da unidade após o valor, sem qualquer espaço\n"
+" (FREQuência em kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"
+
+#: utils/cpufreq-set.c:57
+#, c-format
+msgid ""
+"Error setting new values. Common errors:\n"
+"- Do you have proper administration rights? (super-user?)\n"
+"- Is the governor you requested available and modprobed?\n"
+"- Trying to set an invalid policy?\n"
+"- Trying to set a specific frequency, but userspace governor is not "
+"available,\n"
+" for example because of hardware which cannot be set to a specific "
+"frequency\n"
+" or because the userspace governor isn't loaded?\n"
+msgstr ""
+"Erro ao setar novos valores. Erros comuns:\n"
+"- Você tem direitos administrativos necessários? (super-usuário?)\n"
+"- O regulador que você requesitou está disponível e foi \"modprobed\"?\n"
+"- Tentando setar uma política inválida?\n"
+"- Tentando setar uma frequência específica, mas o regulador em nível de "
+"usuário não está disponível,\n"
+" por exemplo devido ao hardware que não pode ser setado pra uma frequência "
+"específica\n"
+" ou porque o regulador em nível de usuário não foi carregado?\n"
+
+#: utils/cpufreq-set.c:170
+#, c-format
+msgid "wrong, unknown or unhandled CPU?\n"
+msgstr "CPU errado, desconhecido ou inesperado?\n"
+
+#: utils/cpufreq-set.c:302
+#, c-format
+msgid ""
+"the -f/--freq parameter cannot be combined with -d/--min, -u/--max or\n"
+"-g/--governor parameters\n"
+msgstr ""
+"o parâmetro -f/--freq não pode ser combinado com os parâmetros -d/--min, -"
+"u/--max ou\n"
+"-g/--governor\n"
+
+#: utils/cpufreq-set.c:308
+#, c-format
+msgid ""
+"At least one parameter out of -f/--freq, -d/--min, -u/--max, and\n"
+"-g/--governor must be passed\n"
+msgstr ""
+"Pelo menos um parâmetro entre -f/--freq, -d/--min, -u/--max, e\n"
+"-g/--governor deve ser usado\n"
+
+#: utils/cpufreq-set.c:347
+#, c-format
+msgid "Setting cpu: %d\n"
+msgstr ""
+
+#: utils/cpupower-set.c:22
+#, c-format
+msgid "Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:24
+#, c-format
+msgid ""
+" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-set.c:26
+#, c-format
+msgid ""
+" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:27
+#, c-format
+msgid ""
+" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
+"policy.\n"
+msgstr ""
+
+#: utils/cpupower-set.c:80
+#, c-format
+msgid "--perf-bias param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:91
+#, c-format
+msgid "--sched-mc param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:102
+#, c-format
+msgid "--sched-smt param out of range [0-%d]\n"
+msgstr ""
+
+#: utils/cpupower-set.c:121
+#, c-format
+msgid "Error setting sched-mc %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:127
+#, c-format
+msgid "Error setting sched-smt %s\n"
+msgstr ""
+
+#: utils/cpupower-set.c:146
+#, c-format
+msgid "Error setting perf-bias value on CPU %d\n"
+msgstr ""
+
+#: utils/cpupower-info.c:21
+#, c-format
+msgid "Usage: cpupower info [ -b ] [ -m ] [ -s ]\n"
+msgstr ""
+
+#: utils/cpupower-info.c:23
+#, c-format
+msgid ""
+" -b, --perf-bias Gets CPU's power vs performance policy on some\n"
+" Intel models [0-15], see manpage for details\n"
+msgstr ""
+
+#: utils/cpupower-info.c:25
+#, fuzzy, c-format
+msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
+msgstr ""
+"--p, --policy Obtem a política do cpufreq em uso no momento *\n"
+
+#: utils/cpupower-info.c:26
+#, c-format
+msgid ""
+" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
+msgstr ""
+
+#: utils/cpupower-info.c:28
+#, c-format
+msgid ""
+"\n"
+"Passing no option will show all info, by default only on core 0\n"
+msgstr ""
+
+#: utils/cpupower-info.c:102
+#, c-format
+msgid "System's multi core scheduler setting: "
+msgstr ""
+
+#. if sysfs file is missing it's: errno == ENOENT
+#: utils/cpupower-info.c:105 utils/cpupower-info.c:114
+#, c-format
+msgid "not supported\n"
+msgstr ""
+
+#: utils/cpupower-info.c:111
+#, c-format
+msgid "System's thread sibling scheduler setting: "
+msgstr ""
+
+#: utils/cpupower-info.c:126
+#, c-format
+msgid "Intel's performance bias setting needs root privileges\n"
+msgstr ""
+
+#: utils/cpupower-info.c:128
+#, c-format
+msgid "System does not support Intel's performance bias setting\n"
+msgstr ""
+
+#: utils/cpupower-info.c:147
+#, c-format
+msgid "Could not read perf-bias value\n"
+msgstr ""
+
+#: utils/cpupower-info.c:150
+#, c-format
+msgid "perf-bias: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:28
+#, fuzzy, c-format
+msgid "Analyzing CPU %d:\n"
+msgstr "analisando o CPU %d:\n"
+
+#: utils/cpuidle-info.c:32
+#, c-format
+msgid "CPU %u: No idle states\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:36
+#, c-format
+msgid "CPU %u: Can't read idle state info\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:41
+#, c-format
+msgid "Could not determine max idle state %u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:46
+#, c-format
+msgid "Number of idle states: %d\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:48
+#, fuzzy, c-format
+msgid "Available idle states:"
+msgstr " níveis de frequência disponíveis: "
+
+#: utils/cpuidle-info.c:71
+#, c-format
+msgid "Flags/Description: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:74
+#, c-format
+msgid "Latency: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:76
+#, c-format
+msgid "Usage: %lu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:78
+#, c-format
+msgid "Duration: %llu\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:90
+#, c-format
+msgid "Could not determine cpuidle driver\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:94
+#, fuzzy, c-format
+msgid "CPUidle driver: %s\n"
+msgstr " driver: %s\n"
+
+#: utils/cpuidle-info.c:99
+#, c-format
+msgid "Could not determine cpuidle governor\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:103
+#, c-format
+msgid "CPUidle governor: %s\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:122
+#, c-format
+msgid "CPU %u: Can't read C-state info\n"
+msgstr ""
+
+#. printf("Cstates: %d\n", cstates);
+#: utils/cpuidle-info.c:127
+#, c-format
+msgid "active state: C0\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:128
+#, c-format
+msgid "max_cstate: C%u\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:129
+#, fuzzy, c-format
+msgid "maximum allowed latency: %lu usec\n"
+msgstr " maior latência de transição: "
+
+#: utils/cpuidle-info.c:130
+#, c-format
+msgid "states:\t\n"
+msgstr ""
+
+#: utils/cpuidle-info.c:132
+#, c-format
+msgid " C%d: type[C%d] "
+msgstr ""
+
+#: utils/cpuidle-info.c:134
+#, c-format
+msgid "promotion[--] demotion[--] "
+msgstr ""
+
+#: utils/cpuidle-info.c:135
+#, c-format
+msgid "latency[%03lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:137
+#, c-format
+msgid "usage[%08lu] "
+msgstr ""
+
+#: utils/cpuidle-info.c:139
+#, c-format
+msgid "duration[%020Lu] \n"
+msgstr ""
+
+#: utils/cpuidle-info.c:147
+#, fuzzy, c-format
+msgid "Usage: cpupower idleinfo [options]\n"
+msgstr "Uso: cpufreq-info [opções]\n"
+
+#: utils/cpuidle-info.c:149
+#, fuzzy, c-format
+msgid " -s, --silent Only show general C-state information\n"
+msgstr " -e, --debug Mostra informação de debug\n"
+
+#: utils/cpuidle-info.c:150
+#, fuzzy, c-format
+msgid ""
+" -o, --proc Prints out information like provided by the /proc/"
+"acpi/processor/*/power\n"
+" interface in older kernels\n"
+msgstr ""
+" -o, --proc Mostra informação do tipo provida pela interface /"
+"proc/cpufreq\n"
+" em kernels 2.4. e mais recentes 2.6\n"
+
+#: utils/cpuidle-info.c:209
+#, fuzzy, c-format
+msgid "You can't specify more than one output-specific argument\n"
+msgstr ""
+"Você não pode especificar mais do que um parâmetro --cpu e/ou\n"
+"mais do que um argumento de saída específico\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU CPU number which information shall be determined "
+#~ "about\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU número do CPU sobre o qual as inforções devem ser "
+#~ "determinadas\n"
+
+#~ msgid ""
+#~ " -c CPU, --cpu CPU number of CPU where cpufreq settings shall be "
+#~ "modified\n"
+#~ msgstr ""
+#~ " -c CPU, --cpu CPU número do CPU onde as configurações do cpufreq "
+#~ "vão ser modificadas\n"
diff --git a/tools/power/cpupower/utils/builtin.h b/tools/power/cpupower/utils/builtin.h
new file mode 100644
index 00000000000..c870ffba521
--- /dev/null
+++ b/tools/power/cpupower/utils/builtin.h
@@ -0,0 +1,18 @@
+#ifndef BUILTIN_H
+#define BUILTIN_H
+
+extern int cmd_set(int argc, const char **argv);
+extern int cmd_info(int argc, const char **argv);
+extern int cmd_freq_set(int argc, const char **argv);
+extern int cmd_freq_info(int argc, const char **argv);
+extern int cmd_idle_info(int argc, const char **argv);
+extern int cmd_monitor(int argc, const char **argv);
+
+extern void set_help(void);
+extern void info_help(void);
+extern void freq_set_help(void);
+extern void freq_info_help(void);
+extern void idle_info_help(void);
+extern void monitor_help(void);
+
+#endif
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
new file mode 100644
index 00000000000..5a1d25f056b
--- /dev/null
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -0,0 +1,708 @@
+/*
+ * (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ */
+
+
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <getopt.h>
+
+#include "cpufreq.h"
+#include "helpers/helpers.h"
+#include "helpers/bitmask.h"
+
+#define LINE_LEN 10
+
+static unsigned int count_cpus(void)
+{
+ FILE *fp;
+ char value[LINE_LEN];
+ unsigned int ret = 0;
+ unsigned int cpunr = 0;
+
+ fp = fopen("/proc/stat", "r");
+ if (!fp) {
+ printf(_("Couldn't count the number of CPUs (%s: %s), assuming 1\n"), "/proc/stat", strerror(errno));
+ return 1;
+ }
+
+ while (!feof(fp)) {
+ if (!fgets(value, LINE_LEN, fp))
+ continue;
+ value[LINE_LEN - 1] = '\0';
+ if (strlen(value) < (LINE_LEN - 2))
+ continue;
+ if (strstr(value, "cpu "))
+ continue;
+ if (sscanf(value, "cpu%d ", &cpunr) != 1)
+ continue;
+ if (cpunr > ret)
+ ret = cpunr;
+ }
+ fclose(fp);
+
+ /* cpu count starts from 0, on error return 1 (UP) */
+ return ret + 1;
+}
+
+
+static void proc_cpufreq_output(void)
+{
+ unsigned int cpu, nr_cpus;
+ struct cpufreq_policy *policy;
+ unsigned int min_pctg = 0;
+ unsigned int max_pctg = 0;
+ unsigned long min, max;
+
+ printf(_(" minimum CPU frequency - maximum CPU frequency - governor\n"));
+
+ nr_cpus = count_cpus();
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ policy = cpufreq_get_policy(cpu);
+ if (!policy)
+ continue;
+
+ if (cpufreq_get_hardware_limits(cpu, &min, &max)) {
+ max = 0;
+ } else {
+ min_pctg = (policy->min * 100) / max;
+ max_pctg = (policy->max * 100) / max;
+ }
+ printf("CPU%3d %9lu kHz (%3d %%) - %9lu kHz (%3d %%) - %s\n",
+ cpu , policy->min, max ? min_pctg : 0, policy->max,
+ max ? max_pctg : 0, policy->governor);
+
+ cpufreq_put_policy(policy);
+ }
+}
+
+static void print_speed(unsigned long speed)
+{
+ unsigned long tmp;
+
+ if (speed > 1000000) {
+ tmp = speed % 10000;
+ if (tmp >= 5000)
+ speed += 10000;
+ printf("%u.%02u GHz", ((unsigned int) speed/1000000),
+ ((unsigned int) (speed%1000000)/10000));
+ } else if (speed > 100000) {
+ tmp = speed % 1000;
+ if (tmp >= 500)
+ speed += 1000;
+ printf("%u MHz", ((unsigned int) speed / 1000));
+ } else if (speed > 1000) {
+ tmp = speed % 100;
+ if (tmp >= 50)
+ speed += 100;
+ printf("%u.%01u MHz", ((unsigned int) speed/1000),
+ ((unsigned int) (speed%1000)/100));
+ } else
+ printf("%lu kHz", speed);
+
+ return;
+}
+
+static void print_duration(unsigned long duration)
+{
+ unsigned long tmp;
+
+ if (duration > 1000000) {
+ tmp = duration % 10000;
+ if (tmp >= 5000)
+ duration += 10000;
+ printf("%u.%02u ms", ((unsigned int) duration/1000000),
+ ((unsigned int) (duration%1000000)/10000));
+ } else if (duration > 100000) {
+ tmp = duration % 1000;
+ if (tmp >= 500)
+ duration += 1000;
+ printf("%u us", ((unsigned int) duration / 1000));
+ } else if (duration > 1000) {
+ tmp = duration % 100;
+ if (tmp >= 50)
+ duration += 100;
+ printf("%u.%01u us", ((unsigned int) duration/1000),
+ ((unsigned int) (duration%1000)/100));
+ } else
+ printf("%lu ns", duration);
+
+ return;
+}
+
+/* --boost / -b */
+
+static int get_boost_mode(unsigned int cpu)
+{
+ int support, active, b_states = 0, ret, pstate_no, i;
+ /* ToDo: Make this more global */
+ unsigned long pstates[MAX_HW_PSTATES] = {0,};
+
+ if (cpupower_cpu_info.vendor != X86_VENDOR_AMD &&
+ cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
+ return 0;
+
+ ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states);
+ if (ret) {
+ printf(_("Error while evaluating Boost Capabilities"
+ " on CPU %d -- are you root?\n"), cpu);
+ return ret;
+ }
+ /* P state changes via MSR are identified via cpuid 80000007
+ on Intel and AMD, but we assume boost capable machines can do that
+ if (cpuid_eax(0x80000000) >= 0x80000007
+ && (cpuid_edx(0x80000007) & (1 << 7)))
+ */
+
+ printf(_(" boost state support:\n"));
+
+ printf(_(" Supported: %s\n"), support ? _("yes") : _("no"));
+ printf(_(" Active: %s\n"), active ? _("yes") : _("no"));
+
+ if (cpupower_cpu_info.vendor == X86_VENDOR_AMD &&
+ cpupower_cpu_info.family >= 0x10) {
+ ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states,
+ pstates, &pstate_no);
+ if (ret)
+ return ret;
+
+ printf(_(" Boost States: %d\n"), b_states);
+ printf(_(" Total States: %d\n"), pstate_no);
+ for (i = 0; i < pstate_no; i++) {
+ if (i < b_states)
+ printf(_(" Pstate-Pb%d: %luMHz (boost state)"
+ "\n"), i, pstates[i]);
+ else
+ printf(_(" Pstate-P%d: %luMHz\n"),
+ i - b_states, pstates[i]);
+ }
+ } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_HAS_TURBO_RATIO) {
+ double bclk;
+ unsigned long long intel_turbo_ratio = 0;
+ unsigned int ratio;
+
+ /* Any way to autodetect this ? */
+ if (cpupower_cpu_info.caps & CPUPOWER_CAP_IS_SNB)
+ bclk = 100.00;
+ else
+ bclk = 133.33;
+ intel_turbo_ratio = msr_intel_get_turbo_ratio(cpu);
+ dprint (" Ratio: 0x%llx - bclk: %f\n",
+ intel_turbo_ratio, bclk);
+
+ ratio = (intel_turbo_ratio >> 24) & 0xFF;
+ if (ratio)
+ printf(_(" %.0f MHz max turbo 4 active cores\n"),
+ ratio * bclk);
+
+ ratio = (intel_turbo_ratio >> 16) & 0xFF;
+ if (ratio)
+ printf(_(" %.0f MHz max turbo 3 active cores\n"),
+ ratio * bclk);
+
+ ratio = (intel_turbo_ratio >> 8) & 0xFF;
+ if (ratio)
+ printf(_(" %.0f MHz max turbo 2 active cores\n"),
+ ratio * bclk);
+
+ ratio = (intel_turbo_ratio >> 0) & 0xFF;
+ if (ratio)
+ printf(_(" %.0f MHz max turbo 1 active cores\n"),
+ ratio * bclk);
+ }
+ return 0;
+}
+
+static void debug_output_one(unsigned int cpu)
+{
+ char *driver;
+ struct cpufreq_affected_cpus *cpus;
+ struct cpufreq_available_frequencies *freqs;
+ unsigned long min, max, freq_kernel, freq_hardware;
+ unsigned long total_trans, latency;
+ unsigned long long total_time;
+ struct cpufreq_policy *policy;
+ struct cpufreq_available_governors *governors;
+ struct cpufreq_stats *stats;
+
+ if (cpufreq_cpu_exists(cpu))
+ return;
+
+ freq_kernel = cpufreq_get_freq_kernel(cpu);
+ freq_hardware = cpufreq_get_freq_hardware(cpu);
+
+ driver = cpufreq_get_driver(cpu);
+ if (!driver) {
+ printf(_(" no or unknown cpufreq driver is active on this CPU\n"));
+ } else {
+ printf(_(" driver: %s\n"), driver);
+ cpufreq_put_driver(driver);
+ }
+
+ cpus = cpufreq_get_related_cpus(cpu);
+ if (cpus) {
+ printf(_(" CPUs which run at the same hardware frequency: "));
+ while (cpus->next) {
+ printf("%d ", cpus->cpu);
+ cpus = cpus->next;
+ }
+ printf("%d\n", cpus->cpu);
+ cpufreq_put_related_cpus(cpus);
+ }
+
+ cpus = cpufreq_get_affected_cpus(cpu);
+ if (cpus) {
+ printf(_(" CPUs which need to have their frequency coordinated by software: "));
+ while (cpus->next) {
+ printf("%d ", cpus->cpu);
+ cpus = cpus->next;
+ }
+ printf("%d\n", cpus->cpu);
+ cpufreq_put_affected_cpus(cpus);
+ }
+
+ latency = cpufreq_get_transition_latency(cpu);
+ if (latency) {
+ printf(_(" maximum transition latency: "));
+ print_duration(latency);
+ printf(".\n");
+ }
+
+ if (!(cpufreq_get_hardware_limits(cpu, &min, &max))) {
+ printf(_(" hardware limits: "));
+ print_speed(min);
+ printf(" - ");
+ print_speed(max);
+ printf("\n");
+ }
+
+ freqs = cpufreq_get_available_frequencies(cpu);
+ if (freqs) {
+ printf(_(" available frequency steps: "));
+ while (freqs->next) {
+ print_speed(freqs->frequency);
+ printf(", ");
+ freqs = freqs->next;
+ }
+ print_speed(freqs->frequency);
+ printf("\n");
+ cpufreq_put_available_frequencies(freqs);
+ }
+
+ governors = cpufreq_get_available_governors(cpu);
+ if (governors) {
+ printf(_(" available cpufreq governors: "));
+ while (governors->next) {
+ printf("%s, ", governors->governor);
+ governors = governors->next;
+ }
+ printf("%s\n", governors->governor);
+ cpufreq_put_available_governors(governors);
+ }
+
+ policy = cpufreq_get_policy(cpu);
+ if (policy) {
+ printf(_(" current policy: frequency should be within "));
+ print_speed(policy->min);
+ printf(_(" and "));
+ print_speed(policy->max);
+
+ printf(".\n ");
+ printf(_("The governor \"%s\" may"
+ " decide which speed to use\n within this range.\n"),
+ policy->governor);
+ cpufreq_put_policy(policy);
+ }
+
+ if (freq_kernel || freq_hardware) {
+ printf(_(" current CPU frequency is "));
+ if (freq_hardware) {
+ print_speed(freq_hardware);
+ printf(_(" (asserted by call to hardware)"));
+ } else
+ print_speed(freq_kernel);
+ printf(".\n");
+ }
+ stats = cpufreq_get_stats(cpu, &total_time);
+ if (stats) {
+ printf(_(" cpufreq stats: "));
+ while (stats) {
+ print_speed(stats->frequency);
+ printf(":%.2f%%", (100.0 * stats->time_in_state) / total_time);
+ stats = stats->next;
+ if (stats)
+ printf(", ");
+ }
+ cpufreq_put_stats(stats);
+ total_trans = cpufreq_get_transitions(cpu);
+ if (total_trans)
+ printf(" (%lu)\n", total_trans);
+ else
+ printf("\n");
+ }
+ get_boost_mode(cpu);
+
+}
+
+/* --freq / -f */
+
+static int get_freq_kernel(unsigned int cpu, unsigned int human)
+{
+ unsigned long freq = cpufreq_get_freq_kernel(cpu);
+ if (!freq)
+ return -EINVAL;
+ if (human) {
+ print_speed(freq);
+ printf("\n");
+ } else
+ printf("%lu\n", freq);
+ return 0;
+}
+
+
+/* --hwfreq / -w */
+
+static int get_freq_hardware(unsigned int cpu, unsigned int human)
+{
+ unsigned long freq = cpufreq_get_freq_hardware(cpu);
+ if (!freq)
+ return -EINVAL;
+ if (human) {
+ print_speed(freq);
+ printf("\n");
+ } else
+ printf("%lu\n", freq);
+ return 0;
+}
+
+/* --hwlimits / -l */
+
+static int get_hardware_limits(unsigned int cpu)
+{
+ unsigned long min, max;
+ if (cpufreq_get_hardware_limits(cpu, &min, &max))
+ return -EINVAL;
+ printf("%lu %lu\n", min, max);
+ return 0;
+}
+
+/* --driver / -d */
+
+static int get_driver(unsigned int cpu)
+{
+ char *driver = cpufreq_get_driver(cpu);
+ if (!driver)
+ return -EINVAL;
+ printf("%s\n", driver);
+ cpufreq_put_driver(driver);
+ return 0;
+}
+
+/* --policy / -p */
+
+static int get_policy(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_get_policy(cpu);
+ if (!policy)
+ return -EINVAL;
+ printf("%lu %lu %s\n", policy->min, policy->max, policy->governor);
+ cpufreq_put_policy(policy);
+ return 0;
+}
+
+/* --governors / -g */
+
+static int get_available_governors(unsigned int cpu)
+{
+ struct cpufreq_available_governors *governors =
+ cpufreq_get_available_governors(cpu);
+ if (!governors)
+ return -EINVAL;
+
+ while (governors->next) {
+ printf("%s ", governors->governor);
+ governors = governors->next;
+ }
+ printf("%s\n", governors->governor);
+ cpufreq_put_available_governors(governors);
+ return 0;
+}
+
+
+/* --affected-cpus / -a */
+
+static int get_affected_cpus(unsigned int cpu)
+{
+ struct cpufreq_affected_cpus *cpus = cpufreq_get_affected_cpus(cpu);
+ if (!cpus)
+ return -EINVAL;
+
+ while (cpus->next) {
+ printf("%d ", cpus->cpu);
+ cpus = cpus->next;
+ }
+ printf("%d\n", cpus->cpu);
+ cpufreq_put_affected_cpus(cpus);
+ return 0;
+}
+
+/* --related-cpus / -r */
+
+static int get_related_cpus(unsigned int cpu)
+{
+ struct cpufreq_affected_cpus *cpus = cpufreq_get_related_cpus(cpu);
+ if (!cpus)
+ return -EINVAL;
+
+ while (cpus->next) {
+ printf("%d ", cpus->cpu);
+ cpus = cpus->next;
+ }
+ printf("%d\n", cpus->cpu);
+ cpufreq_put_related_cpus(cpus);
+ return 0;
+}
+
+/* --stats / -s */
+
+static int get_freq_stats(unsigned int cpu, unsigned int human)
+{
+ unsigned long total_trans = cpufreq_get_transitions(cpu);
+ unsigned long long total_time;
+ struct cpufreq_stats *stats = cpufreq_get_stats(cpu, &total_time);
+ while (stats) {
+ if (human) {
+ print_speed(stats->frequency);
+ printf(":%.2f%%",
+ (100.0 * stats->time_in_state) / total_time);
+ } else
+ printf("%lu:%llu",
+ stats->frequency, stats->time_in_state);
+ stats = stats->next;
+ if (stats)
+ printf(", ");
+ }
+ cpufreq_put_stats(stats);
+ if (total_trans)
+ printf(" (%lu)\n", total_trans);
+ return 0;
+}
+
+/* --latency / -y */
+
+static int get_latency(unsigned int cpu, unsigned int human)
+{
+ unsigned long latency = cpufreq_get_transition_latency(cpu);
+ if (!latency)
+ return -EINVAL;
+
+ if (human) {
+ print_duration(latency);
+ printf("\n");
+ } else
+ printf("%lu\n", latency);
+ return 0;
+}
+
+void freq_info_help(void)
+{
+ printf(_("Usage: cpupower freqinfo [options]\n"));
+ printf(_("Options:\n"));
+ printf(_(" -e, --debug Prints out debug information [default]\n"));
+ printf(_(" -f, --freq Get frequency the CPU currently runs at, according\n"
+ " to the cpufreq core *\n"));
+ printf(_(" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n"
+ " it from hardware (only available to root) *\n"));
+ printf(_(" -l, --hwlimits Determine the minimum and maximum CPU frequency allowed *\n"));
+ printf(_(" -d, --driver Determines the used cpufreq kernel driver *\n"));
+ printf(_(" -p, --policy Gets the currently used cpufreq policy *\n"));
+ printf(_(" -g, --governors Determines available cpufreq governors *\n"));
+ printf(_(" -r, --related-cpus Determines which CPUs run at the same hardware frequency *\n"));
+ printf(_(" -a, --affected-cpus Determines which CPUs need to have their frequency\n"
+ " coordinated by software *\n"));
+ printf(_(" -s, --stats Shows cpufreq statistics if available\n"));
+ printf(_(" -y, --latency Determines the maximum latency on CPU frequency changes *\n"));
+ printf(_(" -b, --boost Checks for turbo or boost modes *\n"));
+ printf(_(" -o, --proc Prints out information like provided by the /proc/cpufreq\n"
+ " interface in 2.4. and early 2.6. kernels\n"));
+ printf(_(" -m, --human human-readable output for the -f, -w, -s and -y parameters\n"));
+ printf(_(" -h, --help Prints out this screen\n"));
+
+ printf("\n");
+ printf(_("If no argument is given, full output about\n"
+ "cpufreq is printed which is useful e.g. for reporting bugs.\n\n"));
+ printf(_("By default info of CPU 0 is shown which can be overridden\n"
+ "with the cpupower --cpu main command option.\n"));
+}
+
+static struct option info_opts[] = {
+ { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'},
+ { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'},
+ { .name = "freq", .has_arg = no_argument, .flag = NULL, .val = 'f'},
+ { .name = "hwfreq", .has_arg = no_argument, .flag = NULL, .val = 'w'},
+ { .name = "hwlimits", .has_arg = no_argument, .flag = NULL, .val = 'l'},
+ { .name = "driver", .has_arg = no_argument, .flag = NULL, .val = 'd'},
+ { .name = "policy", .has_arg = no_argument, .flag = NULL, .val = 'p'},
+ { .name = "governors", .has_arg = no_argument, .flag = NULL, .val = 'g'},
+ { .name = "related-cpus", .has_arg = no_argument, .flag = NULL, .val = 'r'},
+ { .name = "affected-cpus",.has_arg = no_argument, .flag = NULL, .val = 'a'},
+ { .name = "stats", .has_arg = no_argument, .flag = NULL, .val = 's'},
+ { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'},
+ { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'},
+ { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'},
+ { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
+ { },
+};
+
+int cmd_freq_info(int argc, char **argv)
+{
+ extern char *optarg;
+ extern int optind, opterr, optopt;
+ int ret = 0, cont = 1;
+ unsigned int cpu = 0;
+ unsigned int human = 0;
+ int output_param = 0;
+
+ do {
+ ret = getopt_long(argc, argv, "hoefwldpgrasmyb", info_opts, NULL);
+ switch (ret) {
+ case '?':
+ output_param = '?';
+ cont = 0;
+ break;
+ case 'h':
+ output_param = 'h';
+ cont = 0;
+ break;
+ case -1:
+ cont = 0;
+ break;
+ case 'b':
+ case 'o':
+ case 'a':
+ case 'r':
+ case 'g':
+ case 'p':
+ case 'd':
+ case 'l':
+ case 'w':
+ case 'f':
+ case 'e':
+ case 's':
+ case 'y':
+ if (output_param) {
+ output_param = -1;
+ cont = 0;
+ break;
+ }
+ output_param = ret;
+ break;
+ case 'm':
+ if (human) {
+ output_param = -1;
+ cont = 0;
+ break;
+ }
+ human = 1;
+ break;
+ default:
+ fprintf(stderr, "invalid or unknown argument\n");
+ return EXIT_FAILURE;
+ }
+ } while (cont);
+
+ switch (output_param) {
+ case 'o':
+ if (!bitmask_isallclear(cpus_chosen)) {
+ printf(_("The argument passed to this tool can't be "
+ "combined with passing a --cpu argument\n"));
+ return -EINVAL;
+ }
+ break;
+ case 0:
+ output_param = 'e';
+ }
+
+ ret = 0;
+
+ /* Default is: show output of CPU 0 only */
+ if (bitmask_isallclear(cpus_chosen))
+ bitmask_setbit(cpus_chosen, 0);
+
+ switch (output_param) {
+ case -1:
+ printf(_("You can't specify more than one --cpu parameter and/or\n"
+ "more than one output-specific argument\n"));
+ return -EINVAL;
+ case '?':
+ printf(_("invalid or unknown argument\n"));
+ freq_info_help();
+ return -EINVAL;
+ case 'h':
+ freq_info_help();
+ return EXIT_SUCCESS;
+ case 'o':
+ proc_cpufreq_output();
+ return EXIT_SUCCESS;
+ }
+
+ for (cpu = bitmask_first(cpus_chosen);
+ cpu <= bitmask_last(cpus_chosen); cpu++) {
+
+ if (!bitmask_isbitset(cpus_chosen, cpu))
+ continue;
+ if (cpufreq_cpu_exists(cpu)) {
+ printf(_("couldn't analyze CPU %d as it doesn't seem to be present\n"), cpu);
+ continue;
+ }
+ printf(_("analyzing CPU %d:\n"), cpu);
+
+ switch (output_param) {
+ case 'b':
+ get_boost_mode(cpu);
+ break;
+ case 'e':
+ debug_output_one(cpu);
+ break;
+ case 'a':
+ ret = get_affected_cpus(cpu);
+ break;
+ case 'r':
+ ret = get_related_cpus(cpu);
+ break;
+ case 'g':
+ ret = get_available_governors(cpu);
+ break;
+ case 'p':
+ ret = get_policy(cpu);
+ break;
+ case 'd':
+ ret = get_driver(cpu);
+ break;
+ case 'l':
+ ret = get_hardware_limits(cpu);
+ break;
+ case 'w':
+ ret = get_freq_hardware(cpu, human);
+ break;
+ case 'f':
+ ret = get_freq_kernel(cpu, human);
+ break;
+ case 's':
+ ret = get_freq_stats(cpu, human);
+ break;
+ case 'y':
+ ret = get_latency(cpu, human);
+ break;
+ }
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
new file mode 100644
index 00000000000..5f783622bf3
--- /dev/null
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -0,0 +1,358 @@
+/*
+ * (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ */
+
+
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <string.h>
+#include <ctype.h>
+
+#include <getopt.h>
+
+#include "cpufreq.h"
+#include "helpers/helpers.h"
+
+#define NORM_FREQ_LEN 32
+
+void freq_set_help(void)
+{
+ printf(_("Usage: cpupower frequency-set [options]\n"));
+ printf(_("Options:\n"));
+ printf(_(" -d FREQ, --min FREQ new minimum CPU frequency the governor may select\n"));
+ printf(_(" -u FREQ, --max FREQ new maximum CPU frequency the governor may select\n"));
+ printf(_(" -g GOV, --governor GOV new cpufreq governor\n"));
+ printf(_(" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n"
+ " governor to be available and loaded\n"));
+ printf(_(" -r, --related Switches all hardware-related CPUs\n"));
+ printf(_(" -h, --help Prints out this screen\n"));
+ printf("\n");
+ printf(_("Notes:\n"
+ "1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n"));
+ printf(_("2. The -f FREQ, --freq FREQ parameter cannot be combined with any other parameter\n"
+ " except the -c CPU, --cpu CPU parameter\n"
+ "3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n"
+ " by postfixing the value with the wanted unit name, without any space\n"
+ " (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n"));
+
+}
+
+static struct option set_opts[] = {
+ { .name = "min", .has_arg = required_argument, .flag = NULL, .val = 'd'},
+ { .name = "max", .has_arg = required_argument, .flag = NULL, .val = 'u'},
+ { .name = "governor", .has_arg = required_argument, .flag = NULL, .val = 'g'},
+ { .name = "freq", .has_arg = required_argument, .flag = NULL, .val = 'f'},
+ { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
+ { .name = "related", .has_arg = no_argument, .flag = NULL, .val='r'},
+ { },
+};
+
+static void print_error(void)
+{
+ printf(_("Error setting new values. Common errors:\n"
+ "- Do you have proper administration rights? (super-user?)\n"
+ "- Is the governor you requested available and modprobed?\n"
+ "- Trying to set an invalid policy?\n"
+ "- Trying to set a specific frequency, but userspace governor is not available,\n"
+ " for example because of hardware which cannot be set to a specific frequency\n"
+ " or because the userspace governor isn't loaded?\n"));
+};
+
+struct freq_units {
+ char *str_unit;
+ int power_of_ten;
+};
+
+const struct freq_units def_units[] = {
+ {"hz", -3},
+ {"khz", 0}, /* default */
+ {"mhz", 3},
+ {"ghz", 6},
+ {"thz", 9},
+ {NULL, 0}
+};
+
+static void print_unknown_arg(void)
+{
+ printf(_("invalid or unknown argument\n"));
+ freq_set_help();
+}
+
+static unsigned long string_to_frequency(const char *str)
+{
+ char normalized[NORM_FREQ_LEN];
+ const struct freq_units *unit;
+ const char *scan;
+ char *end;
+ unsigned long freq;
+ int power = 0, match_count = 0, i, cp, pad;
+
+ while (*str == '0')
+ str++;
+
+ for (scan = str; isdigit(*scan) || *scan == '.'; scan++) {
+ if (*scan == '.' && match_count == 0)
+ match_count = 1;
+ else if (*scan == '.' && match_count == 1)
+ return 0;
+ }
+
+ if (*scan) {
+ match_count = 0;
+ for (unit = def_units; unit->str_unit; unit++) {
+ for (i = 0;
+ scan[i] && tolower(scan[i]) == unit->str_unit[i];
+ ++i)
+ continue;
+ if (scan[i])
+ continue;
+ match_count++;
+ power = unit->power_of_ten;
+ }
+ if (match_count != 1)
+ return 0;
+ }
+
+ /* count the number of digits to be copied */
+ for (cp = 0; isdigit(str[cp]); cp++)
+ continue;
+
+ if (str[cp] == '.') {
+ while (power > -1 && isdigit(str[cp+1]))
+ cp++, power--;
+ }
+ if (power >= -1) /* not enough => pad */
+ pad = power + 1;
+ else /* to much => strip */
+ pad = 0, cp += power + 1;
+ /* check bounds */
+ if (cp <= 0 || cp + pad > NORM_FREQ_LEN - 1)
+ return 0;
+
+ /* copy digits */
+ for (i = 0; i < cp; i++, str++) {
+ if (*str == '.')
+ str++;
+ normalized[i] = *str;
+ }
+ /* and pad */
+ for (; i < cp + pad; i++)
+ normalized[i] = '0';
+
+ /* round up, down ? */
+ match_count = (normalized[i-1] >= '5');
+ /* and drop the decimal part */
+ normalized[i-1] = 0; /* cp > 0 && pad >= 0 ==> i > 0 */
+
+ /* final conversion (and applying rounding) */
+ errno = 0;
+ freq = strtoul(normalized, &end, 10);
+ if (errno)
+ return 0;
+ else {
+ if (match_count && freq != ULONG_MAX)
+ freq++;
+ return freq;
+ }
+}
+
+static int do_new_policy(unsigned int cpu, struct cpufreq_policy *new_pol)
+{
+ struct cpufreq_policy *cur_pol = cpufreq_get_policy(cpu);
+ int ret;
+
+ if (!cur_pol) {
+ printf(_("wrong, unknown or unhandled CPU?\n"));
+ return -EINVAL;
+ }
+
+ if (!new_pol->min)
+ new_pol->min = cur_pol->min;
+
+ if (!new_pol->max)
+ new_pol->max = cur_pol->max;
+
+ if (!new_pol->governor)
+ new_pol->governor = cur_pol->governor;
+
+ ret = cpufreq_set_policy(cpu, new_pol);
+
+ cpufreq_put_policy(cur_pol);
+
+ return ret;
+}
+
+
+static int do_one_cpu(unsigned int cpu, struct cpufreq_policy *new_pol,
+ unsigned long freq, unsigned int pc)
+{
+ switch (pc) {
+ case 0:
+ return cpufreq_set_frequency(cpu, freq);
+
+ case 1:
+ /* if only one value of a policy is to be changed, we can
+ * use a "fast path".
+ */
+ if (new_pol->min)
+ return cpufreq_modify_policy_min(cpu, new_pol->min);
+ else if (new_pol->max)
+ return cpufreq_modify_policy_max(cpu, new_pol->max);
+ else if (new_pol->governor)
+ return cpufreq_modify_policy_governor(cpu,
+ new_pol->governor);
+
+ default:
+ /* slow path */
+ return do_new_policy(cpu, new_pol);
+ }
+}
+
+int cmd_freq_set(int argc, char **argv)
+{
+ extern char *optarg;
+ extern int optind, opterr, optopt;
+ int ret = 0, cont = 1;
+ int double_parm = 0, related = 0, policychange = 0;
+ unsigned long freq = 0;
+ char gov[20];
+ unsigned int cpu;
+
+ struct cpufreq_policy new_pol = {
+ .min = 0,
+ .max = 0,
+ .governor = NULL,
+ };
+
+ /* parameter parsing */
+ do {
+ ret = getopt_long(argc, argv, "d:u:g:f:hr", set_opts, NULL);
+ switch (ret) {
+ case '?':
+ print_unknown_arg();
+ return -EINVAL;
+ case 'h':
+ freq_set_help();
+ return 0;
+ case -1:
+ cont = 0;
+ break;
+ case 'r':
+ if (related)
+ double_parm++;
+ related++;
+ break;
+ case 'd':
+ if (new_pol.min)
+ double_parm++;
+ policychange++;
+ new_pol.min = string_to_frequency(optarg);
+ if (new_pol.min == 0) {
+ print_unknown_arg();
+ return -EINVAL;
+ }
+ break;
+ case 'u':
+ if (new_pol.max)
+ double_parm++;
+ policychange++;
+ new_pol.max = string_to_frequency(optarg);
+ if (new_pol.max == 0) {
+ print_unknown_arg();
+ return -EINVAL;
+ }
+ break;
+ case 'f':
+ if (freq)
+ double_parm++;
+ freq = string_to_frequency(optarg);
+ if (freq == 0) {
+ print_unknown_arg();
+ return -EINVAL;
+ }
+ break;
+ case 'g':
+ if (new_pol.governor)
+ double_parm++;
+ policychange++;
+ if ((strlen(optarg) < 3) || (strlen(optarg) > 18)) {
+ print_unknown_arg();
+ return -EINVAL;
+ }
+ if ((sscanf(optarg, "%s", gov)) != 1) {
+ print_unknown_arg();
+ return -EINVAL;
+ }
+ new_pol.governor = gov;
+ break;
+ }
+ } while (cont);
+
+ /* parameter checking */
+ if (double_parm) {
+ printf("the same parameter was passed more than once\n");
+ return -EINVAL;
+ }
+
+ if (freq && policychange) {
+ printf(_("the -f/--freq parameter cannot be combined with -d/--min, -u/--max or\n"
+ "-g/--governor parameters\n"));
+ return -EINVAL;
+ }
+
+ if (!freq && !policychange) {
+ printf(_("At least one parameter out of -f/--freq, -d/--min, -u/--max, and\n"
+ "-g/--governor must be passed\n"));
+ return -EINVAL;
+ }
+
+ /* Default is: set all CPUs */
+ if (bitmask_isallclear(cpus_chosen))
+ bitmask_setall(cpus_chosen);
+
+ /* Also set frequency settings for related CPUs if -r is passed */
+ if (related) {
+ for (cpu = bitmask_first(cpus_chosen);
+ cpu <= bitmask_last(cpus_chosen); cpu++) {
+ struct cpufreq_affected_cpus *cpus;
+
+ if (!bitmask_isbitset(cpus_chosen, cpu) ||
+ cpufreq_cpu_exists(cpu))
+ continue;
+
+ cpus = cpufreq_get_related_cpus(cpu);
+ if (!cpus)
+ break;
+ while (cpus->next) {
+ bitmask_setbit(cpus_chosen, cpus->cpu);
+ cpus = cpus->next;
+ }
+ cpufreq_put_related_cpus(cpus);
+ }
+ }
+
+
+ /* loop over CPUs */
+ for (cpu = bitmask_first(cpus_chosen);
+ cpu <= bitmask_last(cpus_chosen); cpu++) {
+
+ if (!bitmask_isbitset(cpus_chosen, cpu) ||
+ cpufreq_cpu_exists(cpu))
+ continue;
+
+ printf(_("Setting cpu: %d\n"), cpu);
+ ret = do_one_cpu(cpu, &new_pol, freq, policychange);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ print_error();
+
+ return ret;
+}
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c
new file mode 100644
index 00000000000..70da3574f1e
--- /dev/null
+++ b/tools/power/cpupower/utils/cpuidle-info.c
@@ -0,0 +1,244 @@
+/*
+ * (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
+ * (C) 2010 Thomas Renninger <trenn@suse.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ */
+
+
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <cpufreq.h>
+
+#include "helpers/helpers.h"
+#include "helpers/sysfs.h"
+#include "helpers/bitmask.h"
+
+#define LINE_LEN 10
+
+static void cpuidle_cpu_output(unsigned int cpu, int verbose)
+{
+ int idlestates, idlestate;
+ char *tmp;
+
+ printf(_ ("Analyzing CPU %d:\n"), cpu);
+
+ idlestates = sysfs_get_idlestate_count(cpu);
+ if (idlestates == 0) {
+ printf(_("CPU %u: No idle states\n"), cpu);
+ return;
+ } else if (idlestates <= 0) {
+ printf(_("CPU %u: Can't read idle state info\n"), cpu);
+ return;
+ }
+ tmp = sysfs_get_idlestate_name(cpu, idlestates - 1);
+ if (!tmp) {
+ printf(_("Could not determine max idle state %u\n"),
+ idlestates - 1);
+ return;
+ }
+
+ printf(_("Number of idle states: %d\n"), idlestates);
+
+ printf(_("Available idle states:"));
+ for (idlestate = 1; idlestate < idlestates; idlestate++) {
+ tmp = sysfs_get_idlestate_name(cpu, idlestate);
+ if (!tmp)
+ continue;
+ printf(" %s", tmp);
+ free(tmp);
+ }
+ printf("\n");
+
+ if (!verbose)
+ return;
+
+ for (idlestate = 1; idlestate < idlestates; idlestate++) {
+ tmp = sysfs_get_idlestate_name(cpu, idlestate);
+ if (!tmp)
+ continue;
+ printf("%s:\n", tmp);
+ free(tmp);
+
+ tmp = sysfs_get_idlestate_desc(cpu, idlestate);
+ if (!tmp)
+ continue;
+ printf(_("Flags/Description: %s\n"), tmp);
+ free(tmp);
+
+ printf(_("Latency: %lu\n"),
+ sysfs_get_idlestate_latency(cpu, idlestate));
+ printf(_("Usage: %lu\n"),
+ sysfs_get_idlestate_usage(cpu, idlestate));
+ printf(_("Duration: %llu\n"),
+ sysfs_get_idlestate_time(cpu, idlestate));
+ }
+ printf("\n");
+}
+
+static void cpuidle_general_output(void)
+{
+ char *tmp;
+
+ tmp = sysfs_get_cpuidle_driver();
+ if (!tmp) {
+ printf(_("Could not determine cpuidle driver\n"));
+ return;
+ }
+
+ printf(_("CPUidle driver: %s\n"), tmp);
+ free(tmp);
+
+ tmp = sysfs_get_cpuidle_governor();
+ if (!tmp) {
+ printf(_("Could not determine cpuidle governor\n"));
+ return;
+ }
+
+ printf(_("CPUidle governor: %s\n"), tmp);
+ free(tmp);
+}
+
+static void proc_cpuidle_cpu_output(unsigned int cpu)
+{
+ long max_allowed_cstate = 2000000000;
+ int cstates, cstate;
+
+ cstates = sysfs_get_idlestate_count(cpu);
+ if (cstates == 0) {
+ /*
+ * Go on and print same useless info as you'd see with
+ * cat /proc/acpi/processor/../power
+ * printf(_("CPU %u: No C-states available\n"), cpu);
+ * return;
+ */
+ } else if (cstates <= 0) {
+ printf(_("CPU %u: Can't read C-state info\n"), cpu);
+ return;
+ }
+ /* printf("Cstates: %d\n", cstates); */
+
+ printf(_("active state: C0\n"));
+ printf(_("max_cstate: C%u\n"), cstates-1);
+ printf(_("maximum allowed latency: %lu usec\n"), max_allowed_cstate);
+ printf(_("states:\t\n"));
+ for (cstate = 1; cstate < cstates; cstate++) {
+ printf(_(" C%d: "
+ "type[C%d] "), cstate, cstate);
+ printf(_("promotion[--] demotion[--] "));
+ printf(_("latency[%03lu] "),
+ sysfs_get_idlestate_latency(cpu, cstate));
+ printf(_("usage[%08lu] "),
+ sysfs_get_idlestate_usage(cpu, cstate));
+ printf(_("duration[%020Lu] \n"),
+ sysfs_get_idlestate_time(cpu, cstate));
+ }
+}
+
+/* --freq / -f */
+
+void idle_info_help(void)
+{
+ printf(_ ("Usage: cpupower idleinfo [options]\n"));
+ printf(_ ("Options:\n"));
+ printf(_ (" -s, --silent Only show general C-state information\n"));
+ printf(_ (" -o, --proc Prints out information like provided by the /proc/acpi/processor/*/power\n"
+ " interface in older kernels\n"));
+ printf(_ (" -h, --help Prints out this screen\n"));
+
+ printf("\n");
+}
+
+static struct option info_opts[] = {
+ { .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'},
+ { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'},
+ { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
+ { },
+};
+
+static inline void cpuidle_exit(int fail)
+{
+ idle_info_help();
+ exit(EXIT_FAILURE);
+}
+
+int cmd_idle_info(int argc, char **argv)
+{
+ extern char *optarg;
+ extern int optind, opterr, optopt;
+ int ret = 0, cont = 1, output_param = 0, verbose = 1;
+ unsigned int cpu = 0;
+
+ do {
+ ret = getopt_long(argc, argv, "hos", info_opts, NULL);
+ if (ret == -1)
+ break;
+ switch (ret) {
+ case '?':
+ output_param = '?';
+ cont = 0;
+ break;
+ case 'h':
+ output_param = 'h';
+ cont = 0;
+ break;
+ case 's':
+ verbose = 0;
+ break;
+ case -1:
+ cont = 0;
+ break;
+ case 'o':
+ if (output_param) {
+ output_param = -1;
+ cont = 0;
+ break;
+ }
+ output_param = ret;
+ break;
+ }
+ } while (cont);
+
+ switch (output_param) {
+ case -1:
+ printf(_("You can't specify more than one "
+ "output-specific argument\n"));
+ cpuidle_exit(EXIT_FAILURE);
+ case '?':
+ printf(_("invalid or unknown argument\n"));
+ cpuidle_exit(EXIT_FAILURE);
+ case 'h':
+ cpuidle_exit(EXIT_SUCCESS);
+ }
+
+ /* Default is: show output of CPU 0 only */
+ if (bitmask_isallclear(cpus_chosen))
+ bitmask_setbit(cpus_chosen, 0);
+
+ if (output_param == 0)
+ cpuidle_general_output();
+
+ for (cpu = bitmask_first(cpus_chosen);
+ cpu <= bitmask_last(cpus_chosen); cpu++) {
+
+ if (!bitmask_isbitset(cpus_chosen, cpu) ||
+ cpufreq_cpu_exists(cpu))
+ continue;
+
+ switch (output_param) {
+
+ case 'o':
+ proc_cpuidle_cpu_output(cpu);
+ break;
+ case 0:
+ printf("\n");
+ cpuidle_cpu_output(cpu, verbose);
+ break;
+ }
+ }
+ return EXIT_SUCCESS;
+}
diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c
new file mode 100644
index 00000000000..85253cb7600
--- /dev/null
+++ b/tools/power/cpupower/utils/cpupower-info.c
@@ -0,0 +1,153 @@
+/*
+ * (C) 2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ */
+
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <getopt.h>
+
+#include <cpufreq.h>
+#include "helpers/helpers.h"
+#include "helpers/sysfs.h"
+
+void info_help(void)
+{
+ printf(_("Usage: cpupower info [ -b ] [ -m ] [ -s ]\n"));
+ printf(_("Options:\n"));
+ printf(_(" -b, --perf-bias Gets CPU's power vs performance policy on some\n"
+ " Intel models [0-15], see manpage for details\n"));
+ printf(_(" -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"));
+ printf(_(" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"));
+ printf(_(" -h, --help Prints out this screen\n"));
+ printf(_("\nPassing no option will show all info, by default only on core 0\n"));
+ printf("\n");
+}
+
+static struct option set_opts[] = {
+ { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
+ { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
+ { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
+ { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
+ { },
+};
+
+static void print_wrong_arg_exit(void)
+{
+ printf(_("invalid or unknown argument\n"));
+ info_help();
+ exit(EXIT_FAILURE);
+}
+
+int cmd_info(int argc, char **argv)
+{
+ extern char *optarg;
+ extern int optind, opterr, optopt;
+ unsigned int cpu;
+
+ union {
+ struct {
+ int sched_mc:1;
+ int sched_smt:1;
+ int perf_bias:1;
+ };
+ int params;
+ } params = {};
+ int ret = 0;
+
+ setlocale(LC_ALL, "");
+ textdomain(PACKAGE);
+
+ /* parameter parsing */
+ while ((ret = getopt_long(argc, argv, "msbh", set_opts, NULL)) != -1) {
+ switch (ret) {
+ case 'h':
+ info_help();
+ return 0;
+ case 'b':
+ if (params.perf_bias)
+ print_wrong_arg_exit();
+ params.perf_bias = 1;
+ break;
+ case 'm':
+ if (params.sched_mc)
+ print_wrong_arg_exit();
+ params.sched_mc = 1;
+ break;
+ case 's':
+ if (params.sched_smt)
+ print_wrong_arg_exit();
+ params.sched_smt = 1;
+ break;
+ default:
+ print_wrong_arg_exit();
+ }
+ };
+
+ if (!params.params)
+ params.params = 0x7;
+
+ /* Default is: show output of CPU 0 only */
+ if (bitmask_isallclear(cpus_chosen))
+ bitmask_setbit(cpus_chosen, 0);
+
+ if (params.sched_mc) {
+ ret = sysfs_get_sched("mc");
+ printf(_("System's multi core scheduler setting: "));
+ if (ret < 0)
+ /* if sysfs file is missing it's: errno == ENOENT */
+ printf(_("not supported\n"));
+ else
+ printf("%d\n", ret);
+ }
+ if (params.sched_smt) {
+ ret = sysfs_get_sched("smt");
+ printf(_("System's thread sibling scheduler setting: "));
+ if (ret < 0)
+ /* if sysfs file is missing it's: errno == ENOENT */
+ printf(_("not supported\n"));
+ else
+ printf("%d\n", ret);
+ }
+
+ /* Add more per cpu options here */
+ if (!params.perf_bias)
+ return ret;
+
+ if (params.perf_bias) {
+ if (!run_as_root) {
+ params.perf_bias = 0;
+ printf(_("Intel's performance bias setting needs root privileges\n"));
+ } else if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS)) {
+ printf(_("System does not support Intel's performance"
+ " bias setting\n"));
+ params.perf_bias = 0;
+ }
+ }
+
+ /* loop over CPUs */
+ for (cpu = bitmask_first(cpus_chosen);
+ cpu <= bitmask_last(cpus_chosen); cpu++) {
+
+ if (!bitmask_isbitset(cpus_chosen, cpu) ||
+ cpufreq_cpu_exists(cpu))
+ continue;
+
+ printf(_("analyzing CPU %d:\n"), cpu);
+
+ if (params.perf_bias) {
+ ret = msr_intel_get_perf_bias(cpu);
+ if (ret < 0) {
+ printf(_("Could not read perf-bias value\n"));
+ break;
+ } else
+ printf(_("perf-bias: %d\n"), ret);
+ }
+ }
+ return ret;
+}
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
new file mode 100644
index 00000000000..bc1b391e46f
--- /dev/null
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -0,0 +1,153 @@
+/*
+ * (C) 2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ */
+
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <getopt.h>
+
+#include <cpufreq.h>
+#include "helpers/helpers.h"
+#include "helpers/sysfs.h"
+#include "helpers/bitmask.h"
+
+void set_help(void)
+{
+ printf(_("Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n"));
+ printf(_("Options:\n"));
+ printf(_(" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n"
+ " Intel models [0-15], see manpage for details\n"));
+ printf(_(" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"));
+ printf(_(" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler policy.\n"));
+ printf(_(" -h, --help Prints out this screen\n"));
+ printf("\n");
+}
+
+static struct option set_opts[] = {
+ { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
+ { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
+ { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
+ { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'},
+ { },
+};
+
+static void print_wrong_arg_exit(void)
+{
+ printf(_("invalid or unknown argument\n"));
+ set_help();
+ exit(EXIT_FAILURE);
+}
+
+int cmd_set(int argc, char **argv)
+{
+ extern char *optarg;
+ extern int optind, opterr, optopt;
+ unsigned int cpu;
+
+ union {
+ struct {
+ int sched_mc:1;
+ int sched_smt:1;
+ int perf_bias:1;
+ };
+ int params;
+ } params;
+ int sched_mc = 0, sched_smt = 0, perf_bias = 0;
+ int ret = 0;
+
+ setlocale(LC_ALL, "");
+ textdomain(PACKAGE);
+
+ params.params = 0;
+ /* parameter parsing */
+ while ((ret = getopt_long(argc, argv, "m:s:b:h",
+ set_opts, NULL)) != -1) {
+ switch (ret) {
+ case 'h':
+ set_help();
+ return 0;
+ case 'b':
+ if (params.perf_bias)
+ print_wrong_arg_exit();
+ perf_bias = atoi(optarg);
+ if (perf_bias < 0 || perf_bias > 15) {
+ printf(_("--perf-bias param out "
+ "of range [0-%d]\n"), 15);
+ print_wrong_arg_exit();
+ }
+ params.perf_bias = 1;
+ break;
+ case 'm':
+ if (params.sched_mc)
+ print_wrong_arg_exit();
+ sched_mc = atoi(optarg);
+ if (sched_mc < 0 || sched_mc > 2) {
+ printf(_("--sched-mc param out "
+ "of range [0-%d]\n"), 2);
+ print_wrong_arg_exit();
+ }
+ params.sched_mc = 1;
+ break;
+ case 's':
+ if (params.sched_smt)
+ print_wrong_arg_exit();
+ sched_smt = atoi(optarg);
+ if (sched_smt < 0 || sched_smt > 2) {
+ printf(_("--sched-smt param out "
+ "of range [0-%d]\n"), 2);
+ print_wrong_arg_exit();
+ }
+ params.sched_smt = 1;
+ break;
+ default:
+ print_wrong_arg_exit();
+ }
+ };
+
+ if (!params.params) {
+ set_help();
+ return -EINVAL;
+ }
+
+ if (params.sched_mc) {
+ ret = sysfs_set_sched("mc", sched_mc);
+ if (ret)
+ fprintf(stderr, _("Error setting sched-mc %s\n"),
+ (ret == -ENODEV) ? "not supported" : "");
+ }
+ if (params.sched_smt) {
+ ret = sysfs_set_sched("smt", sched_smt);
+ if (ret)
+ fprintf(stderr, _("Error setting sched-smt %s\n"),
+ (ret == -ENODEV) ? "not supported" : "");
+ }
+
+ /* Default is: set all CPUs */
+ if (bitmask_isallclear(cpus_chosen))
+ bitmask_setall(cpus_chosen);
+
+ /* loop over CPUs */
+ for (cpu = bitmask_first(cpus_chosen);
+ cpu <= bitmask_last(cpus_chosen); cpu++) {
+
+ if (!bitmask_isbitset(cpus_chosen, cpu) ||
+ cpufreq_cpu_exists(cpu))
+ continue;
+
+ if (params.perf_bias) {
+ ret = msr_intel_set_perf_bias(cpu, perf_bias);
+ if (ret) {
+ fprintf(stderr, _("Error setting perf-bias "
+ "value on CPU %d\n"), cpu);
+ break;
+ }
+ }
+ }
+ return ret;
+}
diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c
new file mode 100644
index 00000000000..5844ae0f786
--- /dev/null
+++ b/tools/power/cpupower/utils/cpupower.c
@@ -0,0 +1,203 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Ideas taken over from the perf userspace tool (included in the Linus
+ * kernel git repo): subcommand builtins and param parsing.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "builtin.h"
+#include "helpers/helpers.h"
+#include "helpers/bitmask.h"
+
+struct cmd_struct {
+ const char *cmd;
+ int (*main)(int, const char **);
+ void (*usage)(void);
+ int needs_root;
+};
+
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+
+int cmd_help(int argc, const char **argv);
+
+/* Global cpu_info object available for all binaries
+ * Info only retrieved from CPU 0
+ *
+ * Values will be zero/unknown on non X86 archs
+ */
+struct cpupower_cpu_info cpupower_cpu_info;
+int run_as_root;
+/* Affected cpus chosen by -c/--cpu param */
+struct bitmask *cpus_chosen;
+
+#ifdef DEBUG
+int be_verbose;
+#endif
+
+static void print_help(void);
+
+static struct cmd_struct commands[] = {
+ { "frequency-info", cmd_freq_info, freq_info_help, 0 },
+ { "frequency-set", cmd_freq_set, freq_set_help, 1 },
+ { "idle-info", cmd_idle_info, idle_info_help, 0 },
+ { "set", cmd_set, set_help, 1 },
+ { "info", cmd_info, info_help, 0 },
+ { "monitor", cmd_monitor, monitor_help, 0 },
+ { "help", cmd_help, print_help, 0 },
+ /* { "bench", cmd_bench, NULL, 1 }, */
+};
+
+int cmd_help(int argc, const char **argv)
+{
+ unsigned int i;
+
+ if (argc > 1) {
+ for (i = 0; i < ARRAY_SIZE(commands); i++) {
+ struct cmd_struct *p = commands + i;
+ if (strcmp(p->cmd, argv[1]))
+ continue;
+ if (p->usage) {
+ p->usage();
+ return EXIT_SUCCESS;
+ }
+ }
+ }
+ print_help();
+ if (argc == 1)
+ return EXIT_SUCCESS; /* cpupower help */
+ return EXIT_FAILURE;
+}
+
+static void print_help(void)
+{
+ unsigned int i;
+
+#ifdef DEBUG
+ printf(_("cpupower [ -d ][ -c cpulist ] subcommand [ARGS]\n"));
+ printf(_(" -d, --debug May increase output (stderr) on some subcommands\n"));
+#else
+ printf(_("cpupower [ -c cpulist ] subcommand [ARGS]\n"));
+#endif
+ printf(_("cpupower --version\n"));
+ printf(_("Supported subcommands are:\n"));
+ for (i = 0; i < ARRAY_SIZE(commands); i++)
+ printf("\t%s\n", commands[i].cmd);
+ printf(_("\nSome subcommands can make use of the -c cpulist option.\n"));
+ printf(_("Look at the general cpupower manpage how to use it\n"));
+ printf(_("and read up the subcommand's manpage whether it is supported.\n"));
+ printf(_("\nUse cpupower help subcommand for getting help for above subcommands.\n"));
+}
+
+static void print_version(void)
+{
+ printf(PACKAGE " " VERSION "\n");
+ printf(_("Report errors and bugs to %s, please.\n"), PACKAGE_BUGREPORT);
+}
+
+static void handle_options(int *argc, const char ***argv)
+{
+ int ret, x, new_argc = 0;
+
+ if (*argc < 1)
+ return;
+
+ for (x = 0; x < *argc && ((*argv)[x])[0] == '-'; x++) {
+ const char *param = (*argv)[x];
+ if (!strcmp(param, "-h") || !strcmp(param, "--help")) {
+ print_help();
+ exit(EXIT_SUCCESS);
+ } else if (!strcmp(param, "-c") || !strcmp(param, "--cpu")) {
+ if (*argc < 2) {
+ print_help();
+ exit(EXIT_FAILURE);
+ }
+ if (!strcmp((*argv)[x+1], "all"))
+ bitmask_setall(cpus_chosen);
+ else {
+ ret = bitmask_parselist(
+ (*argv)[x+1], cpus_chosen);
+ if (ret < 0) {
+ fprintf(stderr, _("Error parsing cpu "
+ "list\n"));
+ exit(EXIT_FAILURE);
+ }
+ }
+ x += 1;
+ /* Cut out param: cpupower -c 1 info -> cpupower info */
+ new_argc += 2;
+ continue;
+ } else if (!strcmp(param, "-v") ||
+ !strcmp(param, "--version")) {
+ print_version();
+ exit(EXIT_SUCCESS);
+#ifdef DEBUG
+ } else if (!strcmp(param, "-d") || !strcmp(param, "--debug")) {
+ be_verbose = 1;
+ new_argc++;
+ continue;
+#endif
+ } else {
+ fprintf(stderr, "Unknown option: %s\n", param);
+ print_help();
+ exit(EXIT_FAILURE);
+ }
+ }
+ *argc -= new_argc;
+ *argv += new_argc;
+}
+
+int main(int argc, const char *argv[])
+{
+ const char *cmd;
+ unsigned int i, ret;
+
+ cpus_chosen = bitmask_alloc(sysconf(_SC_NPROCESSORS_CONF));
+
+ argc--;
+ argv += 1;
+
+ handle_options(&argc, &argv);
+
+ cmd = argv[0];
+
+ if (argc < 1) {
+ print_help();
+ return EXIT_FAILURE;
+ }
+
+ setlocale(LC_ALL, "");
+ textdomain(PACKAGE);
+
+ /* Turn "perf cmd --help" into "perf help cmd" */
+ if (argc > 1 && !strcmp(argv[1], "--help")) {
+ argv[1] = argv[0];
+ argv[0] = cmd = "help";
+ }
+
+ get_cpu_info(0, &cpupower_cpu_info);
+ run_as_root = !getuid();
+
+ for (i = 0; i < ARRAY_SIZE(commands); i++) {
+ struct cmd_struct *p = commands + i;
+ if (strcmp(p->cmd, cmd))
+ continue;
+ if (!run_as_root && p->needs_root) {
+ fprintf(stderr, _("Subcommand %s needs root "
+ "privileges\n"), cmd);
+ return EXIT_FAILURE;
+ }
+ ret = p->main(argc, argv);
+ if (cpus_chosen)
+ bitmask_free(cpus_chosen);
+ return ret;
+ }
+ print_help();
+ return EXIT_FAILURE;
+}
diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
new file mode 100644
index 00000000000..87d5605bdda
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/amd.c
@@ -0,0 +1,137 @@
+#if defined(__i386__) || defined(__x86_64__)
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+
+#include <pci/pci.h>
+
+#include "helpers/helpers.h"
+
+#define MSR_AMD_PSTATE_STATUS 0xc0010063
+#define MSR_AMD_PSTATE 0xc0010064
+#define MSR_AMD_PSTATE_LIMIT 0xc0010061
+
+union msr_pstate {
+ struct {
+ unsigned fid:6;
+ unsigned did:3;
+ unsigned vid:7;
+ unsigned res1:6;
+ unsigned nbdid:1;
+ unsigned res2:2;
+ unsigned nbvid:7;
+ unsigned iddval:8;
+ unsigned idddiv:2;
+ unsigned res3:21;
+ unsigned en:1;
+ } bits;
+ unsigned long long val;
+};
+
+static int get_did(int family, union msr_pstate pstate)
+{
+ int t;
+
+ if (family == 0x12)
+ t = pstate.val & 0xf;
+ else
+ t = pstate.bits.did;
+
+ return t;
+}
+
+static int get_cof(int family, union msr_pstate pstate)
+{
+ int t;
+ int fid, did;
+
+ did = get_did(family, pstate);
+
+ t = 0x10;
+ fid = pstate.bits.fid;
+ if (family == 0x11)
+ t = 0x8;
+
+ return (100 * (fid + t)) >> did;
+}
+
+/* Needs:
+ * cpu -> the cpu that gets evaluated
+ * cpu_family -> The cpu's family (0x10, 0x12,...)
+ * boots_states -> how much boost states the machines support
+ *
+ * Fills up:
+ * pstates -> a pointer to an array of size MAX_HW_PSTATES
+ * must be initialized with zeros.
+ * All available HW pstates (including boost states)
+ * no -> amount of pstates above array got filled up with
+ *
+ * returns zero on success, -1 on failure
+ */
+int decode_pstates(unsigned int cpu, unsigned int cpu_family,
+ int boost_states, unsigned long *pstates, int *no)
+{
+ int i, psmax, pscur;
+ union msr_pstate pstate;
+ unsigned long long val;
+
+ /* Only read out frequencies from HW when CPU might be boostable
+ to keep the code as short and clean as possible.
+ Otherwise frequencies are exported via ACPI tables.
+ */
+ if (cpu_family < 0x10 || cpu_family == 0x14)
+ return -1;
+
+ if (read_msr(cpu, MSR_AMD_PSTATE_LIMIT, &val))
+ return -1;
+
+ psmax = (val >> 4) & 0x7;
+
+ if (read_msr(cpu, MSR_AMD_PSTATE_STATUS, &val))
+ return -1;
+
+ pscur = val & 0x7;
+
+ pscur += boost_states;
+ psmax += boost_states;
+ for (i = 0; i <= psmax; i++) {
+ if (i >= MAX_HW_PSTATES) {
+ fprintf(stderr, "HW pstates [%d] exceeding max [%d]\n",
+ psmax, MAX_HW_PSTATES);
+ return -1;
+ }
+ if (read_msr(cpu, MSR_AMD_PSTATE + i, &pstate.val))
+ return -1;
+ pstates[i] = get_cof(cpu_family, pstate);
+ }
+ *no = i;
+ return 0;
+}
+
+int amd_pci_get_num_boost_states(int *active, int *states)
+{
+ struct pci_access *pci_acc;
+ int vendor_id = 0x1022;
+ int boost_dev_ids[4] = {0x1204, 0x1604, 0x1704, 0};
+ struct pci_dev *device;
+ uint8_t val = 0;
+
+ *active = *states = 0;
+
+ device = pci_acc_init(&pci_acc, vendor_id, boost_dev_ids);
+
+ if (device == NULL)
+ return -ENODEV;
+
+ val = pci_read_byte(device, 0x15c);
+ if (val & 3)
+ *active = 1;
+ else
+ *active = 0;
+ *states = (val >> 2) & 7;
+
+ pci_cleanup(pci_acc);
+ return 0;
+}
+#endif /* defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/helpers/bitmask.c b/tools/power/cpupower/utils/helpers/bitmask.c
new file mode 100644
index 00000000000..5c074c60f90
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/bitmask.c
@@ -0,0 +1,292 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <helpers/bitmask.h>
+
+/* How many bits in an unsigned long */
+#define bitsperlong (8 * sizeof(unsigned long))
+
+/* howmany(a,b) : how many elements of size b needed to hold all of a */
+#define howmany(x, y) (((x)+((y)-1))/(y))
+
+/* How many longs in mask of n bits */
+#define longsperbits(n) howmany(n, bitsperlong)
+
+#define max(a, b) ((a) > (b) ? (a) : (b))
+
+/*
+ * Allocate and free `struct bitmask *`
+ */
+
+/* Allocate a new `struct bitmask` with a size of n bits */
+struct bitmask *bitmask_alloc(unsigned int n)
+{
+ struct bitmask *bmp;
+
+ bmp = malloc(sizeof(*bmp));
+ if (bmp == 0)
+ return 0;
+ bmp->size = n;
+ bmp->maskp = calloc(longsperbits(n), sizeof(unsigned long));
+ if (bmp->maskp == 0) {
+ free(bmp);
+ return 0;
+ }
+ return bmp;
+}
+
+/* Free `struct bitmask` */
+void bitmask_free(struct bitmask *bmp)
+{
+ if (bmp == 0)
+ return;
+ free(bmp->maskp);
+ bmp->maskp = (unsigned long *)0xdeadcdef; /* double free tripwire */
+ free(bmp);
+}
+
+/*
+ * The routines _getbit() and _setbit() are the only
+ * routines that actually understand the layout of bmp->maskp[].
+ *
+ * On little endian architectures, this could simply be an array of
+ * bytes. But the kernel layout of bitmasks _is_ visible to userspace
+ * via the sched_(set/get)affinity calls in Linux 2.6, and on big
+ * endian architectures, it is painfully obvious that this is an
+ * array of unsigned longs.
+ */
+
+/* Return the value (0 or 1) of bit n in bitmask bmp */
+static unsigned int _getbit(const struct bitmask *bmp, unsigned int n)
+{
+ if (n < bmp->size)
+ return (bmp->maskp[n/bitsperlong] >> (n % bitsperlong)) & 1;
+ else
+ return 0;
+}
+
+/* Set bit n in bitmask bmp to value v (0 or 1) */
+static void _setbit(struct bitmask *bmp, unsigned int n, unsigned int v)
+{
+ if (n < bmp->size) {
+ if (v)
+ bmp->maskp[n/bitsperlong] |= 1UL << (n % bitsperlong);
+ else
+ bmp->maskp[n/bitsperlong] &=
+ ~(1UL << (n % bitsperlong));
+ }
+}
+
+/*
+ * When parsing bitmask lists, only allow numbers, separated by one
+ * of the allowed next characters.
+ *
+ * The parameter 'sret' is the return from a sscanf "%u%c". It is
+ * -1 if the sscanf input string was empty. It is 0 if the first
+ * character in the sscanf input string was not a decimal number.
+ * It is 1 if the unsigned number matching the "%u" was the end of the
+ * input string. It is 2 if one or more additional characters followed
+ * the matched unsigned number. If it is 2, then 'nextc' is the first
+ * character following the number. The parameter 'ok_next_chars'
+ * is the nul-terminated list of allowed next characters.
+ *
+ * The mask term just scanned was ok if and only if either the numbers
+ * matching the %u were all of the input or if the next character in
+ * the input past the numbers was one of the allowed next characters.
+ */
+static int scan_was_ok(int sret, char nextc, const char *ok_next_chars)
+{
+ return sret == 1 ||
+ (sret == 2 && strchr(ok_next_chars, nextc) != NULL);
+}
+
+static const char *nexttoken(const char *q, int sep)
+{
+ if (q)
+ q = strchr(q, sep);
+ if (q)
+ q++;
+ return q;
+}
+
+/* Set a single bit i in bitmask */
+struct bitmask *bitmask_setbit(struct bitmask *bmp, unsigned int i)
+{
+ _setbit(bmp, i, 1);
+ return bmp;
+}
+
+/* Set all bits in bitmask: bmp = ~0 */
+struct bitmask *bitmask_setall(struct bitmask *bmp)
+{
+ unsigned int i;
+ for (i = 0; i < bmp->size; i++)
+ _setbit(bmp, i, 1);
+ return bmp;
+}
+
+/* Clear all bits in bitmask: bmp = 0 */
+struct bitmask *bitmask_clearall(struct bitmask *bmp)
+{
+ unsigned int i;
+ for (i = 0; i < bmp->size; i++)
+ _setbit(bmp, i, 0);
+ return bmp;
+}
+
+/* True if all bits are clear */
+int bitmask_isallclear(const struct bitmask *bmp)
+{
+ unsigned int i;
+ for (i = 0; i < bmp->size; i++)
+ if (_getbit(bmp, i))
+ return 0;
+ return 1;
+}
+
+/* True if specified bit i is set */
+int bitmask_isbitset(const struct bitmask *bmp, unsigned int i)
+{
+ return _getbit(bmp, i);
+}
+
+/* Number of lowest set bit (min) */
+unsigned int bitmask_first(const struct bitmask *bmp)
+{
+ return bitmask_next(bmp, 0);
+}
+
+/* Number of highest set bit (max) */
+unsigned int bitmask_last(const struct bitmask *bmp)
+{
+ unsigned int i;
+ unsigned int m = bmp->size;
+ for (i = 0; i < bmp->size; i++)
+ if (_getbit(bmp, i))
+ m = i;
+ return m;
+}
+
+/* Number of next set bit at or above given bit i */
+unsigned int bitmask_next(const struct bitmask *bmp, unsigned int i)
+{
+ unsigned int n;
+ for (n = i; n < bmp->size; n++)
+ if (_getbit(bmp, n))
+ break;
+ return n;
+}
+
+/*
+ * Parses a comma-separated list of numbers and ranges of numbers,
+ * with optional ':%u' strides modifying ranges, into provided bitmask.
+ * Some examples of input lists and their equivalent simple list:
+ * Input Equivalent to
+ * 0-3 0,1,2,3
+ * 0-7:2 0,2,4,6
+ * 1,3,5-7 1,3,5,6,7
+ * 0-3:2,8-15:4 0,2,8,12
+ */
+int bitmask_parselist(const char *buf, struct bitmask *bmp)
+{
+ const char *p, *q;
+
+ bitmask_clearall(bmp);
+
+ q = buf;
+ while (p = q, q = nexttoken(q, ','), p) {
+ unsigned int a; /* begin of range */
+ unsigned int b; /* end of range */
+ unsigned int s; /* stride */
+ const char *c1, *c2; /* next tokens after '-' or ',' */
+ char nextc; /* char after sscanf %u match */
+ int sret; /* sscanf return (number of matches) */
+
+ sret = sscanf(p, "%u%c", &a, &nextc);
+ if (!scan_was_ok(sret, nextc, ",-"))
+ goto err;
+ b = a;
+ s = 1;
+ c1 = nexttoken(p, '-');
+ c2 = nexttoken(p, ',');
+ if (c1 != NULL && (c2 == NULL || c1 < c2)) {
+ sret = sscanf(c1, "%u%c", &b, &nextc);
+ if (!scan_was_ok(sret, nextc, ",:"))
+ goto err;
+ c1 = nexttoken(c1, ':');
+ if (c1 != NULL && (c2 == NULL || c1 < c2)) {
+ sret = sscanf(c1, "%u%c", &s, &nextc);
+ if (!scan_was_ok(sret, nextc, ","))
+ goto err;
+ }
+ }
+ if (!(a <= b))
+ goto err;
+ if (b >= bmp->size)
+ goto err;
+ while (a <= b) {
+ _setbit(bmp, a, 1);
+ a += s;
+ }
+ }
+ return 0;
+err:
+ bitmask_clearall(bmp);
+ return -1;
+}
+
+/*
+ * emit(buf, buflen, rbot, rtop, len)
+ *
+ * Helper routine for bitmask_displaylist(). Write decimal number
+ * or range to buf+len, suppressing output past buf+buflen, with optional
+ * comma-prefix. Return len of what would be written to buf, if it
+ * all fit.
+ */
+
+static inline int emit(char *buf, int buflen, int rbot, int rtop, int len)
+{
+ if (len > 0)
+ len += snprintf(buf + len, max(buflen - len, 0), ",");
+ if (rbot == rtop)
+ len += snprintf(buf + len, max(buflen - len, 0), "%d", rbot);
+ else
+ len += snprintf(buf + len, max(buflen - len, 0), "%d-%d",
+ rbot, rtop);
+ return len;
+}
+
+/*
+ * Write decimal list representation of bmp to buf.
+ *
+ * Output format is a comma-separated list of decimal numbers and
+ * ranges. Consecutively set bits are shown as two hyphen-separated
+ * decimal numbers, the smallest and largest bit numbers set in
+ * the range. Output format is compatible with the format
+ * accepted as input by bitmap_parselist().
+ *
+ * The return value is the number of characters which would be
+ * generated for the given input, excluding the trailing '\0', as
+ * per ISO C99.
+ */
+
+int bitmask_displaylist(char *buf, int buflen, const struct bitmask *bmp)
+{
+ int len = 0;
+ /* current bit is 'cur', most recently seen range is [rbot, rtop] */
+ unsigned int cur, rbot, rtop;
+
+ if (buflen > 0)
+ *buf = 0;
+ rbot = cur = bitmask_first(bmp);
+ while (cur < bmp->size) {
+ rtop = cur;
+ cur = bitmask_next(bmp, cur+1);
+ if (cur >= bmp->size || cur > rtop + 1) {
+ len = emit(buf, buflen, rbot, rtop, len);
+ rbot = cur;
+ }
+ }
+ return len;
+}
diff --git a/tools/power/cpupower/utils/helpers/bitmask.h b/tools/power/cpupower/utils/helpers/bitmask.h
new file mode 100644
index 00000000000..eb289df4105
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/bitmask.h
@@ -0,0 +1,33 @@
+#ifndef __CPUPOWER_BITMASK__
+#define __CPUPOWER_BITMASK__
+
+/* Taken over from libbitmask, a project initiated from sgi:
+ * Url: http://oss.sgi.com/projects/cpusets/
+ * Unfortunately it's not very widespread, therefore relevant parts are
+ * pasted here.
+ */
+
+struct bitmask {
+ unsigned int size;
+ unsigned long *maskp;
+};
+
+struct bitmask *bitmask_alloc(unsigned int n);
+void bitmask_free(struct bitmask *bmp);
+
+struct bitmask *bitmask_setbit(struct bitmask *bmp, unsigned int i);
+struct bitmask *bitmask_setall(struct bitmask *bmp);
+struct bitmask *bitmask_clearall(struct bitmask *bmp);
+
+unsigned int bitmask_first(const struct bitmask *bmp);
+unsigned int bitmask_next(const struct bitmask *bmp, unsigned int i);
+unsigned int bitmask_last(const struct bitmask *bmp);
+int bitmask_isallclear(const struct bitmask *bmp);
+int bitmask_isbitset(const struct bitmask *bmp, unsigned int i);
+
+int bitmask_parselist(const char *buf, struct bitmask *bmp);
+int bitmask_displaylist(char *buf, int len, const struct bitmask *bmp);
+
+
+
+#endif /*__CPUPOWER_BITMASK__ */
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
new file mode 100644
index 00000000000..906895d21cc
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -0,0 +1,176 @@
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#include "helpers/helpers.h"
+
+static const char *cpu_vendor_table[X86_VENDOR_MAX] = {
+ "Unknown", "GenuineIntel", "AuthenticAMD",
+};
+
+#if defined(__i386__) || defined(__x86_64__)
+
+/* from gcc */
+#include <cpuid.h>
+
+/*
+ * CPUID functions returning a single datum
+ *
+ * Define unsigned int cpuid_e[abcd]x(unsigned int op)
+ */
+#define cpuid_func(reg) \
+ unsigned int cpuid_##reg(unsigned int op) \
+ { \
+ unsigned int eax, ebx, ecx, edx; \
+ __cpuid(op, eax, ebx, ecx, edx); \
+ return reg; \
+ }
+cpuid_func(eax);
+cpuid_func(ebx);
+cpuid_func(ecx);
+cpuid_func(edx);
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+/* get_cpu_info
+ *
+ * Extract CPU vendor, family, model, stepping info from /proc/cpuinfo
+ *
+ * Returns 0 on success or a negativ error code
+ *
+ * TBD: Should there be a cpuid alternative for this if /proc is not mounted?
+ */
+int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info)
+{
+ FILE *fp;
+ char value[64];
+ unsigned int proc, x;
+ unsigned int unknown = 0xffffff;
+ unsigned int cpuid_level, ext_cpuid_level;
+
+ int ret = -EINVAL;
+
+ cpu_info->vendor = X86_VENDOR_UNKNOWN;
+ cpu_info->family = unknown;
+ cpu_info->model = unknown;
+ cpu_info->stepping = unknown;
+ cpu_info->caps = 0;
+
+ fp = fopen("/proc/cpuinfo", "r");
+ if (!fp)
+ return -EIO;
+
+ while (!feof(fp)) {
+ if (!fgets(value, 64, fp))
+ continue;
+ value[63 - 1] = '\0';
+
+ if (!strncmp(value, "processor\t: ", 12))
+ sscanf(value, "processor\t: %u", &proc);
+
+ if (proc != cpu)
+ continue;
+
+ /* Get CPU vendor */
+ if (!strncmp(value, "vendor_id", 9)) {
+ for (x = 1; x < X86_VENDOR_MAX; x++) {
+ if (strstr(value, cpu_vendor_table[x]))
+ cpu_info->vendor = x;
+ }
+ /* Get CPU family, etc. */
+ } else if (!strncmp(value, "cpu family\t: ", 13)) {
+ sscanf(value, "cpu family\t: %u",
+ &cpu_info->family);
+ } else if (!strncmp(value, "model\t\t: ", 9)) {
+ sscanf(value, "model\t\t: %u",
+ &cpu_info->model);
+ } else if (!strncmp(value, "stepping\t: ", 10)) {
+ sscanf(value, "stepping\t: %u",
+ &cpu_info->stepping);
+
+ /* Exit -> all values must have been set */
+ if (cpu_info->vendor == X86_VENDOR_UNKNOWN ||
+ cpu_info->family == unknown ||
+ cpu_info->model == unknown ||
+ cpu_info->stepping == unknown) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+ goto out;
+ }
+ }
+ ret = -ENODEV;
+out:
+ fclose(fp);
+ /* Get some useful CPU capabilities from cpuid */
+ if (cpu_info->vendor != X86_VENDOR_AMD &&
+ cpu_info->vendor != X86_VENDOR_INTEL)
+ return ret;
+
+ cpuid_level = cpuid_eax(0);
+ ext_cpuid_level = cpuid_eax(0x80000000);
+
+ /* Invariant TSC */
+ if (ext_cpuid_level >= 0x80000007 &&
+ (cpuid_edx(0x80000007) & (1 << 8)))
+ cpu_info->caps |= CPUPOWER_CAP_INV_TSC;
+
+ /* Aperf/Mperf registers support */
+ if (cpuid_level >= 6 && (cpuid_ecx(6) & 0x1))
+ cpu_info->caps |= CPUPOWER_CAP_APERF;
+
+ /* AMD Boost state enable/disable register */
+ if (cpu_info->vendor == X86_VENDOR_AMD) {
+ if (ext_cpuid_level >= 0x80000007 &&
+ (cpuid_edx(0x80000007) & (1 << 9)))
+ cpu_info->caps |= CPUPOWER_CAP_AMD_CBP;
+ }
+
+ if (cpu_info->vendor == X86_VENDOR_INTEL) {
+ if (cpuid_level >= 6 &&
+ (cpuid_eax(6) & (1 << 1)))
+ cpu_info->caps |= CPUPOWER_CAP_INTEL_IDA;
+ }
+
+ if (cpu_info->vendor == X86_VENDOR_INTEL) {
+ /* Intel's perf-bias MSR support */
+ if (cpuid_level >= 6 && (cpuid_ecx(6) & (1 << 3)))
+ cpu_info->caps |= CPUPOWER_CAP_PERF_BIAS;
+
+ /* Intel's Turbo Ratio Limit support */
+ if (cpu_info->family == 6) {
+ switch (cpu_info->model) {
+ case 0x1A: /* Core i7, Xeon 5500 series
+ * Bloomfield, Gainstown NHM-EP
+ */
+ case 0x1E: /* Core i7 and i5 Processor
+ * Clarksfield, Lynnfield, Jasper Forest
+ */
+ case 0x1F: /* Core i7 and i5 Processor - Nehalem */
+ case 0x25: /* Westmere Client
+ * Clarkdale, Arrandale
+ */
+ case 0x2C: /* Westmere EP - Gulftown */
+ cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+ case 0x2A: /* SNB */
+ case 0x2D: /* SNB Xeon */
+ cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+ cpu_info->caps |= CPUPOWER_CAP_IS_SNB;
+ break;
+ case 0x2E: /* Nehalem-EX Xeon - Beckton */
+ case 0x2F: /* Westmere-EX Xeon - Eagleton */
+ default:
+ break;
+ }
+ }
+ }
+
+ /* printf("ID: %u - Extid: 0x%x - Caps: 0x%llx\n",
+ cpuid_level, ext_cpuid_level, cpu_info->caps);
+ */
+ return ret;
+}
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
new file mode 100644
index 00000000000..592ee362b87
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -0,0 +1,178 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Miscellaneous helpers which do not fit or are worth
+ * to put into separate headers
+ */
+
+#ifndef __CPUPOWERUTILS_HELPERS__
+#define __CPUPOWERUTILS_HELPERS__
+
+#include <libintl.h>
+#include <locale.h>
+
+#include "helpers/bitmask.h"
+
+/* Internationalization ****************************/
+#define _(String) gettext(String)
+#ifndef gettext_noop
+#define gettext_noop(String) String
+#endif
+#define N_(String) gettext_noop(String)
+/* Internationalization ****************************/
+
+extern int run_as_root;
+extern struct bitmask *cpus_chosen;
+
+/* Global verbose (-d) stuff *********************************/
+/*
+ * define DEBUG via global Makefile variable
+ * Debug output is sent to stderr, do:
+ * cpupower monitor 2>/tmp/debug
+ * to split debug output away from normal output
+*/
+#ifdef DEBUG
+extern int be_verbose;
+
+#define dprint(fmt, ...) { \
+ if (be_verbose) { \
+ fprintf(stderr, "%s: " fmt, \
+ __func__, ##__VA_ARGS__); \
+ } \
+ }
+#else
+static inline void dprint(const char *fmt, ...) { }
+#endif
+extern int be_verbose;
+/* Global verbose (-v) stuff *********************************/
+
+/* cpuid and cpuinfo helpers **************************/
+enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
+ X86_VENDOR_AMD, X86_VENDOR_MAX};
+
+#define CPUPOWER_CAP_INV_TSC 0x00000001
+#define CPUPOWER_CAP_APERF 0x00000002
+#define CPUPOWER_CAP_AMD_CBP 0x00000004
+#define CPUPOWER_CAP_PERF_BIAS 0x00000008
+#define CPUPOWER_CAP_HAS_TURBO_RATIO 0x00000010
+#define CPUPOWER_CAP_IS_SNB 0x00000011
+#define CPUPOWER_CAP_INTEL_IDA 0x00000012
+
+#define MAX_HW_PSTATES 10
+
+struct cpupower_cpu_info {
+ enum cpupower_cpu_vendor vendor;
+ unsigned int family;
+ unsigned int model;
+ unsigned int stepping;
+ /* CPU capabilities read out from cpuid */
+ unsigned long long caps;
+};
+
+/* get_cpu_info
+ *
+ * Extract CPU vendor, family, model, stepping info from /proc/cpuinfo
+ *
+ * Returns 0 on success or a negativ error code
+ * Only used on x86, below global's struct values are zero/unknown on
+ * other archs
+ */
+extern int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info);
+extern struct cpupower_cpu_info cpupower_cpu_info;
+/* cpuid and cpuinfo helpers **************************/
+
+
+/* CPU topology/hierarchy parsing ******************/
+struct cpupower_topology {
+ /* Amount of CPU cores, packages and threads per core in the system */
+ unsigned int cores;
+ unsigned int pkgs;
+ unsigned int threads; /* per core */
+
+ /* Array gets mallocated with cores entries, holding per core info */
+ struct {
+ int pkg;
+ int core;
+ int cpu;
+ } *core_info;
+};
+
+extern int get_cpu_topology(struct cpupower_topology *cpu_top);
+extern void cpu_topology_release(struct cpupower_topology cpu_top);
+/* CPU topology/hierarchy parsing ******************/
+
+/* X86 ONLY ****************************************/
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <pci/pci.h>
+
+/* Read/Write msr ****************************/
+extern int read_msr(int cpu, unsigned int idx, unsigned long long *val);
+extern int write_msr(int cpu, unsigned int idx, unsigned long long val);
+
+extern int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val);
+extern int msr_intel_get_perf_bias(unsigned int cpu);
+extern unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu);
+
+/* Read/Write msr ****************************/
+
+/* PCI stuff ****************************/
+extern int amd_pci_get_num_boost_states(int *active, int *states);
+extern struct pci_dev *pci_acc_init(struct pci_access **pacc, int vendor_id,
+ int *dev_ids);
+
+/* PCI stuff ****************************/
+
+/* AMD HW pstate decoding **************************/
+
+extern int decode_pstates(unsigned int cpu, unsigned int cpu_family,
+ int boost_states, unsigned long *pstates, int *no);
+
+/* AMD HW pstate decoding **************************/
+
+extern int cpufreq_has_boost_support(unsigned int cpu, int *support,
+ int *active, int * states);
+/*
+ * CPUID functions returning a single datum
+ */
+unsigned int cpuid_eax(unsigned int op);
+unsigned int cpuid_ebx(unsigned int op);
+unsigned int cpuid_ecx(unsigned int op);
+unsigned int cpuid_edx(unsigned int op);
+
+/* cpuid and cpuinfo helpers **************************/
+/* X86 ONLY ********************************************/
+#else
+static inline int decode_pstates(unsigned int cpu, unsigned int cpu_family,
+ int boost_states, unsigned long *pstates,
+ int *no)
+{ return -1; };
+
+static inline int read_msr(int cpu, unsigned int idx, unsigned long long *val)
+{ return -1; };
+static inline int write_msr(int cpu, unsigned int idx, unsigned long long val)
+{ return -1; };
+static inline int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val)
+{ return -1; };
+static inline int msr_intel_get_perf_bias(unsigned int cpu)
+{ return -1; };
+static inline unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu)
+{ return 0; };
+
+/* Read/Write msr ****************************/
+
+static inline int cpufreq_has_boost_support(unsigned int cpu, int *support,
+ int *active, int * states)
+{ return -1; }
+
+/* cpuid and cpuinfo helpers **************************/
+
+static inline unsigned int cpuid_eax(unsigned int op) { return 0; };
+static inline unsigned int cpuid_ebx(unsigned int op) { return 0; };
+static inline unsigned int cpuid_ecx(unsigned int op) { return 0; };
+static inline unsigned int cpuid_edx(unsigned int op) { return 0; };
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+#endif /* __CPUPOWERUTILS_HELPERS__ */
diff --git a/tools/power/cpupower/utils/helpers/misc.c b/tools/power/cpupower/utils/helpers/misc.c
new file mode 100644
index 00000000000..1609243f5c6
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/misc.c
@@ -0,0 +1,27 @@
+#if defined(__i386__) || defined(__x86_64__)
+
+#include "helpers/helpers.h"
+
+int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
+ int *states)
+{
+ struct cpupower_cpu_info cpu_info;
+ int ret;
+
+ *support = *active = *states = 0;
+
+ ret = get_cpu_info(0, &cpu_info);
+ if (ret)
+ return ret;
+
+ if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_CBP) {
+ *support = 1;
+ amd_pci_get_num_boost_states(active, states);
+ if (ret <= 0)
+ return ret;
+ *support = 1;
+ } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA)
+ *support = *active = 1;
+ return 0;
+}
+#endif /* #if defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/helpers/msr.c b/tools/power/cpupower/utils/helpers/msr.c
new file mode 100644
index 00000000000..31a4b24a8bc
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/msr.c
@@ -0,0 +1,115 @@
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdint.h>
+
+#include "helpers/helpers.h"
+
+/* Intel specific MSRs */
+#define MSR_IA32_PERF_STATUS 0x198
+#define MSR_IA32_MISC_ENABLES 0x1a0
+#define MSR_IA32_ENERGY_PERF_BIAS 0x1b0
+#define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1ad
+
+/*
+ * read_msr
+ *
+ * Will return 0 on success and -1 on failure.
+ * Possible errno values could be:
+ * EFAULT -If the read/write did not fully complete
+ * EIO -If the CPU does not support MSRs
+ * ENXIO -If the CPU does not exist
+ */
+
+int read_msr(int cpu, unsigned int idx, unsigned long long *val)
+{
+ int fd;
+ char msr_file_name[64];
+
+ sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
+ fd = open(msr_file_name, O_RDONLY);
+ if (fd < 0)
+ return -1;
+ if (lseek(fd, idx, SEEK_CUR) == -1)
+ goto err;
+ if (read(fd, val, sizeof *val) != sizeof *val)
+ goto err;
+ close(fd);
+ return 0;
+ err:
+ close(fd);
+ return -1;
+}
+
+/*
+ * write_msr
+ *
+ * Will return 0 on success and -1 on failure.
+ * Possible errno values could be:
+ * EFAULT -If the read/write did not fully complete
+ * EIO -If the CPU does not support MSRs
+ * ENXIO -If the CPU does not exist
+ */
+int write_msr(int cpu, unsigned int idx, unsigned long long val)
+{
+ int fd;
+ char msr_file_name[64];
+
+ sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu);
+ fd = open(msr_file_name, O_WRONLY);
+ if (fd < 0)
+ return -1;
+ if (lseek(fd, idx, SEEK_CUR) == -1)
+ goto err;
+ if (write(fd, &val, sizeof val) != sizeof val)
+ goto err;
+ close(fd);
+ return 0;
+ err:
+ close(fd);
+ return -1;
+}
+
+int msr_intel_get_perf_bias(unsigned int cpu)
+{
+ unsigned long long val;
+ int ret;
+
+ if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS))
+ return -1;
+
+ ret = read_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &val);
+ if (ret)
+ return ret;
+ return val;
+}
+
+int msr_intel_set_perf_bias(unsigned int cpu, unsigned int val)
+{
+ int ret;
+
+ if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS))
+ return -1;
+
+ ret = write_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, val);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu)
+{
+ unsigned long long val;
+ int ret;
+
+ if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_HAS_TURBO_RATIO))
+ return -1;
+
+ ret = read_msr(cpu, MSR_NEHALEM_TURBO_RATIO_LIMIT, &val);
+ if (ret)
+ return ret;
+ return val;
+}
+#endif
diff --git a/tools/power/cpupower/utils/helpers/pci.c b/tools/power/cpupower/utils/helpers/pci.c
new file mode 100644
index 00000000000..cd2eb6fe41c
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/pci.c
@@ -0,0 +1,44 @@
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <helpers/helpers.h>
+
+/*
+ * pci_acc_init
+ *
+ * PCI access helper function depending on libpci
+ *
+ * **pacc : if a valid pci_dev is returned
+ * *pacc must be passed to pci_acc_cleanup to free it
+ *
+ * vendor_id : the pci vendor id matching the pci device to access
+ * dev_ids : device ids matching the pci device to access
+ *
+ * Returns :
+ * struct pci_dev which can be used with pci_{read,write}_* functions
+ * to access the PCI config space of matching pci devices
+ */
+struct pci_dev *pci_acc_init(struct pci_access **pacc, int vendor_id,
+ int *dev_ids)
+{
+ struct pci_filter filter_nb_link = { -1, -1, -1, -1, vendor_id, 0};
+ struct pci_dev *device;
+ unsigned int i;
+
+ *pacc = pci_alloc();
+ if (*pacc == NULL)
+ return NULL;
+
+ pci_init(*pacc);
+ pci_scan_bus(*pacc);
+
+ for (i = 0; dev_ids[i] != 0; i++) {
+ filter_nb_link.device = dev_ids[i];
+ for (device = (*pacc)->devices; device; device = device->next) {
+ if (pci_filter_match(&filter_nb_link, device))
+ return device;
+ }
+ }
+ pci_cleanup(*pacc);
+ return NULL;
+}
+#endif /* defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
new file mode 100644
index 00000000000..55e2466674c
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -0,0 +1,358 @@
+/*
+ * (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
+ * (C) 2011 Thomas Renninger <trenn@novell.com> Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "helpers/sysfs.h"
+
+unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
+{
+ int fd;
+ ssize_t numread;
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ return 0;
+
+ numread = read(fd, buf, buflen - 1);
+ if (numread < 1) {
+ close(fd);
+ return 0;
+ }
+
+ buf[numread] = '\0';
+ close(fd);
+
+ return (unsigned int) numread;
+}
+
+static unsigned int sysfs_write_file(const char *path,
+ const char *value, size_t len)
+{
+ int fd;
+ ssize_t numwrite;
+
+ fd = open(path, O_WRONLY);
+ if (fd == -1)
+ return 0;
+
+ numwrite = write(fd, value, len);
+ if (numwrite < 1) {
+ close(fd);
+ return 0;
+ }
+ close(fd);
+ return (unsigned int) numwrite;
+}
+
+/* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
+
+/*
+ * helper function to read file from /sys into given buffer
+ * fname is a relative path under "cpuX/cpuidle/stateX/" dir
+ * cstates starting with 0, C0 is not counted as cstate.
+ * This means if you want C1 info, pass 0 as idlestate param
+ */
+unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate,
+ const char *fname, char *buf, size_t buflen)
+{
+ char path[SYSFS_PATH_MAX];
+ int fd;
+ ssize_t numread;
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s",
+ cpu, idlestate, fname);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ return 0;
+
+ numread = read(fd, buf, buflen - 1);
+ if (numread < 1) {
+ close(fd);
+ return 0;
+ }
+
+ buf[numread] = '\0';
+ close(fd);
+
+ return (unsigned int) numread;
+}
+
+/* read access to files which contain one numeric value */
+
+enum idlestate_value {
+ IDLESTATE_USAGE,
+ IDLESTATE_POWER,
+ IDLESTATE_LATENCY,
+ IDLESTATE_TIME,
+ MAX_IDLESTATE_VALUE_FILES
+};
+
+static const char *idlestate_value_files[MAX_IDLESTATE_VALUE_FILES] = {
+ [IDLESTATE_USAGE] = "usage",
+ [IDLESTATE_POWER] = "power",
+ [IDLESTATE_LATENCY] = "latency",
+ [IDLESTATE_TIME] = "time",
+};
+
+static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu,
+ unsigned int idlestate,
+ enum idlestate_value which)
+{
+ unsigned long long value;
+ unsigned int len;
+ char linebuf[MAX_LINE_LEN];
+ char *endp;
+
+ if (which >= MAX_IDLESTATE_VALUE_FILES)
+ return 0;
+
+ len = sysfs_idlestate_read_file(cpu, idlestate,
+ idlestate_value_files[which],
+ linebuf, sizeof(linebuf));
+ if (len == 0)
+ return 0;
+
+ value = strtoull(linebuf, &endp, 0);
+
+ if (endp == linebuf || errno == ERANGE)
+ return 0;
+
+ return value;
+}
+
+/* read access to files which contain one string */
+
+enum idlestate_string {
+ IDLESTATE_DESC,
+ IDLESTATE_NAME,
+ MAX_IDLESTATE_STRING_FILES
+};
+
+static const char *idlestate_string_files[MAX_IDLESTATE_STRING_FILES] = {
+ [IDLESTATE_DESC] = "desc",
+ [IDLESTATE_NAME] = "name",
+};
+
+
+static char *sysfs_idlestate_get_one_string(unsigned int cpu,
+ unsigned int idlestate,
+ enum idlestate_string which)
+{
+ char linebuf[MAX_LINE_LEN];
+ char *result;
+ unsigned int len;
+
+ if (which >= MAX_IDLESTATE_STRING_FILES)
+ return NULL;
+
+ len = sysfs_idlestate_read_file(cpu, idlestate,
+ idlestate_string_files[which],
+ linebuf, sizeof(linebuf));
+ if (len == 0)
+ return NULL;
+
+ result = strdup(linebuf);
+ if (result == NULL)
+ return NULL;
+
+ if (result[strlen(result) - 1] == '\n')
+ result[strlen(result) - 1] = '\0';
+
+ return result;
+}
+
+unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
+ unsigned int idlestate)
+{
+ return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_LATENCY);
+}
+
+unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
+ unsigned int idlestate)
+{
+ return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_USAGE);
+}
+
+unsigned long long sysfs_get_idlestate_time(unsigned int cpu,
+ unsigned int idlestate)
+{
+ return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_TIME);
+}
+
+char *sysfs_get_idlestate_name(unsigned int cpu, unsigned int idlestate)
+{
+ return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_NAME);
+}
+
+char *sysfs_get_idlestate_desc(unsigned int cpu, unsigned int idlestate)
+{
+ return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_DESC);
+}
+
+/*
+ * Returns number of supported C-states of CPU core cpu
+ * Negativ in error case
+ * Zero if cpuidle does not export any C-states
+ */
+int sysfs_get_idlestate_count(unsigned int cpu)
+{
+ char file[SYSFS_PATH_MAX];
+ struct stat statbuf;
+ int idlestates = 1;
+
+
+ snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
+ if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
+ return -ENODEV;
+
+ snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
+ if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
+ return 0;
+
+ while (stat(file, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) {
+ snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU
+ "cpu%u/cpuidle/state%d", cpu, idlestates);
+ idlestates++;
+ }
+ idlestates--;
+ return idlestates;
+}
+
+/* CPUidle general /sys/devices/system/cpu/cpuidle/ sysfs access ********/
+
+/*
+ * helper function to read file from /sys into given buffer
+ * fname is a relative path under "cpu/cpuidle/" dir
+ */
+static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
+ size_t buflen)
+{
+ char path[SYSFS_PATH_MAX];
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
+
+ return sysfs_read_file(path, buf, buflen);
+}
+
+
+
+/* read access to files which contain one string */
+
+enum cpuidle_string {
+ CPUIDLE_GOVERNOR,
+ CPUIDLE_GOVERNOR_RO,
+ CPUIDLE_DRIVER,
+ MAX_CPUIDLE_STRING_FILES
+};
+
+static const char *cpuidle_string_files[MAX_CPUIDLE_STRING_FILES] = {
+ [CPUIDLE_GOVERNOR] = "current_governor",
+ [CPUIDLE_GOVERNOR_RO] = "current_governor_ro",
+ [CPUIDLE_DRIVER] = "current_driver",
+};
+
+
+static char *sysfs_cpuidle_get_one_string(enum cpuidle_string which)
+{
+ char linebuf[MAX_LINE_LEN];
+ char *result;
+ unsigned int len;
+
+ if (which >= MAX_CPUIDLE_STRING_FILES)
+ return NULL;
+
+ len = sysfs_cpuidle_read_file(cpuidle_string_files[which],
+ linebuf, sizeof(linebuf));
+ if (len == 0)
+ return NULL;
+
+ result = strdup(linebuf);
+ if (result == NULL)
+ return NULL;
+
+ if (result[strlen(result) - 1] == '\n')
+ result[strlen(result) - 1] = '\0';
+
+ return result;
+}
+
+char *sysfs_get_cpuidle_governor(void)
+{
+ char *tmp = sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR_RO);
+ if (!tmp)
+ return sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR);
+ else
+ return tmp;
+}
+
+char *sysfs_get_cpuidle_driver(void)
+{
+ return sysfs_cpuidle_get_one_string(CPUIDLE_DRIVER);
+}
+/* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
+
+/*
+ * Get sched_mc or sched_smt settings
+ * Pass "mc" or "smt" as argument
+ *
+ * Returns negative value on failure
+ */
+int sysfs_get_sched(const char *smt_mc)
+{
+ unsigned long value;
+ char linebuf[MAX_LINE_LEN];
+ char *endp;
+ char path[SYSFS_PATH_MAX];
+
+ if (strcmp("mc", smt_mc) && strcmp("smt", smt_mc))
+ return -EINVAL;
+
+ snprintf(path, sizeof(path),
+ PATH_TO_CPU "sched_%s_power_savings", smt_mc);
+ if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
+ return -1;
+ value = strtoul(linebuf, &endp, 0);
+ if (endp == linebuf || errno == ERANGE)
+ return -1;
+ return value;
+}
+
+/*
+ * Get sched_mc or sched_smt settings
+ * Pass "mc" or "smt" as argument
+ *
+ * Returns negative value on failure
+ */
+int sysfs_set_sched(const char *smt_mc, int val)
+{
+ char linebuf[MAX_LINE_LEN];
+ char path[SYSFS_PATH_MAX];
+ struct stat statbuf;
+
+ if (strcmp("mc", smt_mc) && strcmp("smt", smt_mc))
+ return -EINVAL;
+
+ snprintf(path, sizeof(path),
+ PATH_TO_CPU "sched_%s_power_savings", smt_mc);
+ sprintf(linebuf, "%d", val);
+
+ if (stat(path, &statbuf) != 0)
+ return -ENODEV;
+
+ if (sysfs_write_file(path, linebuf, MAX_LINE_LEN) == 0)
+ return -1;
+ return 0;
+}
diff --git a/tools/power/cpupower/utils/helpers/sysfs.h b/tools/power/cpupower/utils/helpers/sysfs.h
new file mode 100644
index 00000000000..f9373e09063
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/sysfs.h
@@ -0,0 +1,28 @@
+#ifndef __CPUPOWER_HELPERS_SYSFS_H__
+#define __CPUPOWER_HELPERS_SYSFS_H__
+
+#define PATH_TO_CPU "/sys/devices/system/cpu/"
+#define MAX_LINE_LEN 255
+#define SYSFS_PATH_MAX 255
+
+extern unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen);
+
+extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu,
+ unsigned int idlestate);
+extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu,
+ unsigned int idlestate);
+extern unsigned long long sysfs_get_idlestate_time(unsigned int cpu,
+ unsigned int idlestate);
+extern char *sysfs_get_idlestate_name(unsigned int cpu,
+ unsigned int idlestate);
+extern char *sysfs_get_idlestate_desc(unsigned int cpu,
+ unsigned int idlestate);
+extern int sysfs_get_idlestate_count(unsigned int cpu);
+
+extern char *sysfs_get_cpuidle_governor(void);
+extern char *sysfs_get_cpuidle_driver(void);
+
+extern int sysfs_get_sched(const char *smt_mc);
+extern int sysfs_set_sched(const char *smt_mc, int val);
+
+#endif /* __CPUPOWER_HELPERS_SYSFS_H__ */
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c
new file mode 100644
index 00000000000..385ee5c7570
--- /dev/null
+++ b/tools/power/cpupower/utils/helpers/topology.c
@@ -0,0 +1,108 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * ToDo: Needs to be done more properly for AMD/Intel specifics
+ */
+
+/* Helper struct for qsort, must be in sync with cpupower_topology.cpu_info */
+/* Be careful: Need to pass unsigned to the sort, so that offlined cores are
+ in the end, but double check for -1 for offlined cpus at other places */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <helpers/helpers.h>
+#include <helpers/sysfs.h>
+
+/* returns -1 on failure, 0 on success */
+int sysfs_topology_read_file(unsigned int cpu, const char *fname)
+{
+ unsigned long value;
+ char linebuf[MAX_LINE_LEN];
+ char *endp;
+ char path[SYSFS_PATH_MAX];
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
+ cpu, fname);
+ if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
+ return -1;
+ value = strtoul(linebuf, &endp, 0);
+ if (endp == linebuf || errno == ERANGE)
+ return -1;
+ return value;
+}
+
+struct cpuid_core_info {
+ unsigned int pkg;
+ unsigned int thread;
+ unsigned int cpu;
+};
+
+static int __compare(const void *t1, const void *t2)
+{
+ struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1;
+ struct cpuid_core_info *top2 = (struct cpuid_core_info *)t2;
+ if (top1->pkg < top2->pkg)
+ return -1;
+ else if (top1->pkg > top2->pkg)
+ return 1;
+ else if (top1->thread < top2->thread)
+ return -1;
+ else if (top1->thread > top2->thread)
+ return 1;
+ else if (top1->cpu < top2->cpu)
+ return -1;
+ else if (top1->cpu > top2->cpu)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Returns amount of cpus, negative on error, cpu_top must be
+ * passed to cpu_topology_release to free resources
+ *
+ * Array is sorted after ->pkg, ->core, then ->cpu
+ */
+int get_cpu_topology(struct cpupower_topology *cpu_top)
+{
+ int cpu, cpus = sysconf(_SC_NPROCESSORS_CONF);
+
+ cpu_top->core_info = malloc(sizeof(struct cpupower_topology) * cpus);
+ if (cpu_top->core_info == NULL)
+ return -ENOMEM;
+ cpu_top->pkgs = cpu_top->cores = 0;
+ for (cpu = 0; cpu < cpus; cpu++) {
+ cpu_top->core_info[cpu].pkg =
+ sysfs_topology_read_file(cpu, "physical_package_id");
+ if ((int)cpu_top->core_info[cpu].pkg != -1 &&
+ cpu_top->core_info[cpu].pkg > cpu_top->pkgs)
+ cpu_top->pkgs = cpu_top->core_info[cpu].pkg;
+ cpu_top->core_info[cpu].core =
+ sysfs_topology_read_file(cpu, "core_id");
+ cpu_top->core_info[cpu].cpu = cpu;
+ }
+ cpu_top->pkgs++;
+
+ qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info),
+ __compare);
+
+ /* Intel's cores count is not consecutively numbered, there may
+ * be a core_id of 3, but none of 2. Assume there always is 0
+ * Get amount of cores by counting duplicates in a package
+ for (cpu = 0; cpu_top->core_info[cpu].pkg = 0 && cpu < cpus; cpu++) {
+ if (cpu_top->core_info[cpu].core == 0)
+ cpu_top->cores++;
+ */
+ return cpus;
+}
+
+void cpu_topology_release(struct cpupower_topology cpu_top)
+{
+ free(cpu_top.core_info);
+}
diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
new file mode 100644
index 00000000000..202e555988b
--- /dev/null
+++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
@@ -0,0 +1,338 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * PCI initialization based on example code from:
+ * Andreas Herrmann <andreas.herrmann3@amd.com>
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <time.h>
+#include <string.h>
+
+#include <pci/pci.h>
+
+#include "idle_monitor/cpupower-monitor.h"
+#include "helpers/helpers.h"
+
+/******** PCI parts could go into own file and get shared ***************/
+
+#define PCI_NON_PC0_OFFSET 0xb0
+#define PCI_PC1_OFFSET 0xb4
+#define PCI_PC6_OFFSET 0xb8
+
+#define PCI_MONITOR_ENABLE_REG 0xe0
+
+#define PCI_NON_PC0_ENABLE_BIT 0
+#define PCI_PC1_ENABLE_BIT 1
+#define PCI_PC6_ENABLE_BIT 2
+
+#define PCI_NBP1_STAT_OFFSET 0x98
+#define PCI_NBP1_ACTIVE_BIT 2
+#define PCI_NBP1_ENTERED_BIT 1
+
+#define PCI_NBP1_CAP_OFFSET 0x90
+#define PCI_NBP1_CAPABLE_BIT 31
+
+#define OVERFLOW_MS 343597 /* 32 bit register filled at 12500 HZ
+ (1 tick per 80ns) */
+
+enum amd_fam14h_states {NON_PC0 = 0, PC1, PC6, NBP1,
+ AMD_FAM14H_STATE_NUM};
+
+static int fam14h_get_count_percent(unsigned int self_id, double *percent,
+ unsigned int cpu);
+static int fam14h_nbp1_count(unsigned int id, unsigned long long *count,
+ unsigned int cpu);
+
+static cstate_t amd_fam14h_cstates[AMD_FAM14H_STATE_NUM] = {
+ {
+ .name = "!PC0",
+ .desc = N_("Package in sleep state (PC1 or deeper)"),
+ .id = NON_PC0,
+ .range = RANGE_PACKAGE,
+ .get_count_percent = fam14h_get_count_percent,
+ },
+ {
+ .name = "PC1",
+ .desc = N_("Processor Package C1"),
+ .id = PC1,
+ .range = RANGE_PACKAGE,
+ .get_count_percent = fam14h_get_count_percent,
+ },
+ {
+ .name = "PC6",
+ .desc = N_("Processor Package C6"),
+ .id = PC6,
+ .range = RANGE_PACKAGE,
+ .get_count_percent = fam14h_get_count_percent,
+ },
+ {
+ .name = "NBP1",
+ .desc = N_("North Bridge P1 boolean counter (returns 0 or 1)"),
+ .id = NBP1,
+ .range = RANGE_PACKAGE,
+ .get_count = fam14h_nbp1_count,
+ },
+};
+
+static struct pci_access *pci_acc;
+static int pci_vendor_id = 0x1022;
+static int pci_dev_ids[2] = {0x1716, 0};
+static struct pci_dev *amd_fam14h_pci_dev;
+
+static int nbp1_entered;
+
+struct timespec start_time;
+static unsigned long long timediff;
+
+#ifdef DEBUG
+struct timespec dbg_time;
+long dbg_timediff;
+#endif
+
+static unsigned long long *previous_count[AMD_FAM14H_STATE_NUM];
+static unsigned long long *current_count[AMD_FAM14H_STATE_NUM];
+
+static int amd_fam14h_get_pci_info(struct cstate *state,
+ unsigned int *pci_offset,
+ unsigned int *enable_bit,
+ unsigned int cpu)
+{
+ switch (state->id) {
+ case NON_PC0:
+ *enable_bit = PCI_NON_PC0_ENABLE_BIT;
+ *pci_offset = PCI_NON_PC0_OFFSET;
+ break;
+ case PC1:
+ *enable_bit = PCI_PC1_ENABLE_BIT;
+ *pci_offset = PCI_PC1_OFFSET;
+ break;
+ case PC6:
+ *enable_bit = PCI_PC6_ENABLE_BIT;
+ *pci_offset = PCI_PC6_OFFSET;
+ break;
+ case NBP1:
+ *enable_bit = PCI_NBP1_ENTERED_BIT;
+ *pci_offset = PCI_NBP1_STAT_OFFSET;
+ break;
+ default:
+ return -1;
+ };
+ return 0;
+}
+
+static int amd_fam14h_init(cstate_t *state, unsigned int cpu)
+{
+ int enable_bit, pci_offset, ret;
+ uint32_t val;
+
+ ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu);
+ if (ret)
+ return ret;
+
+ /* NBP1 needs extra treating -> write 1 to D18F6x98 bit 1 for init */
+ if (state->id == NBP1) {
+ val = pci_read_long(amd_fam14h_pci_dev, pci_offset);
+ val |= 1 << enable_bit;
+ val = pci_write_long(amd_fam14h_pci_dev, pci_offset, val);
+ return ret;
+ }
+
+ /* Enable monitor */
+ val = pci_read_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG);
+ dprint("Init %s: read at offset: 0x%x val: %u\n", state->name,
+ PCI_MONITOR_ENABLE_REG, (unsigned int) val);
+ val |= 1 << enable_bit;
+ pci_write_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG, val);
+
+ dprint("Init %s: offset: 0x%x enable_bit: %d - val: %u (%u)\n",
+ state->name, PCI_MONITOR_ENABLE_REG, enable_bit,
+ (unsigned int) val, cpu);
+
+ /* Set counter to zero */
+ pci_write_long(amd_fam14h_pci_dev, pci_offset, 0);
+ previous_count[state->id][cpu] = 0;
+
+ return 0;
+}
+
+static int amd_fam14h_disable(cstate_t *state, unsigned int cpu)
+{
+ int enable_bit, pci_offset, ret;
+ uint32_t val;
+
+ ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu);
+ if (ret)
+ return ret;
+
+ val = pci_read_long(amd_fam14h_pci_dev, pci_offset);
+ dprint("%s: offset: 0x%x %u\n", state->name, pci_offset, val);
+ if (state->id == NBP1) {
+ /* was the bit whether NBP1 got entered set? */
+ nbp1_entered = (val & (1 << PCI_NBP1_ACTIVE_BIT)) |
+ (val & (1 << PCI_NBP1_ENTERED_BIT));
+
+ dprint("NBP1 was %sentered - 0x%x - enable_bit: "
+ "%d - pci_offset: 0x%x\n",
+ nbp1_entered ? "" : "not ",
+ val, enable_bit, pci_offset);
+ return ret;
+ }
+ current_count[state->id][cpu] = val;
+
+ dprint("%s: Current - %llu (%u)\n", state->name,
+ current_count[state->id][cpu], cpu);
+ dprint("%s: Previous - %llu (%u)\n", state->name,
+ previous_count[state->id][cpu], cpu);
+
+ val = pci_read_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG);
+ val &= ~(1 << enable_bit);
+ pci_write_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG, val);
+
+ return 0;
+}
+
+static int fam14h_nbp1_count(unsigned int id, unsigned long long *count,
+ unsigned int cpu)
+{
+ if (id == NBP1) {
+ if (nbp1_entered)
+ *count = 1;
+ else
+ *count = 0;
+ return 0;
+ }
+ return -1;
+}
+static int fam14h_get_count_percent(unsigned int id, double *percent,
+ unsigned int cpu)
+{
+ unsigned long diff;
+
+ if (id >= AMD_FAM14H_STATE_NUM)
+ return -1;
+ /* residency count in 80ns -> divide through 12.5 to get us residency */
+ diff = current_count[id][cpu] - previous_count[id][cpu];
+
+ if (timediff == 0)
+ *percent = 0.0;
+ else
+ *percent = 100.0 * diff / timediff / 12.5;
+
+ dprint("Timediff: %llu - res~: %lu us - percent: %.2f %%\n",
+ timediff, diff * 10 / 125, *percent);
+
+ return 0;
+}
+
+static int amd_fam14h_start(void)
+{
+ int num, cpu;
+ clock_gettime(CLOCK_REALTIME, &start_time);
+ for (num = 0; num < AMD_FAM14H_STATE_NUM; num++) {
+ for (cpu = 0; cpu < cpu_count; cpu++)
+ amd_fam14h_init(&amd_fam14h_cstates[num], cpu);
+ }
+#ifdef DEBUG
+ clock_gettime(CLOCK_REALTIME, &dbg_time);
+ dbg_timediff = timespec_diff_us(start_time, dbg_time);
+ dprint("Enabling counters took: %lu us\n",
+ dbg_timediff);
+#endif
+ return 0;
+}
+
+static int amd_fam14h_stop(void)
+{
+ int num, cpu;
+ struct timespec end_time;
+
+ clock_gettime(CLOCK_REALTIME, &end_time);
+
+ for (num = 0; num < AMD_FAM14H_STATE_NUM; num++) {
+ for (cpu = 0; cpu < cpu_count; cpu++)
+ amd_fam14h_disable(&amd_fam14h_cstates[num], cpu);
+ }
+#ifdef DEBUG
+ clock_gettime(CLOCK_REALTIME, &dbg_time);
+ dbg_timediff = timespec_diff_us(end_time, dbg_time);
+ dprint("Disabling counters took: %lu ns\n", dbg_timediff);
+#endif
+ timediff = timespec_diff_us(start_time, end_time);
+ if (timediff / 1000 > OVERFLOW_MS)
+ print_overflow_err((unsigned int)timediff / 1000000,
+ OVERFLOW_MS / 1000);
+
+ return 0;
+}
+
+static int is_nbp1_capable(void)
+{
+ uint32_t val;
+ val = pci_read_long(amd_fam14h_pci_dev, PCI_NBP1_CAP_OFFSET);
+ return val & (1 << 31);
+}
+
+struct cpuidle_monitor *amd_fam14h_register(void)
+{
+ int num;
+
+ if (cpupower_cpu_info.vendor != X86_VENDOR_AMD)
+ return NULL;
+
+ if (cpupower_cpu_info.family == 0x14) {
+ if (cpu_count <= 0 || cpu_count > 2) {
+ fprintf(stderr, "AMD fam14h: Invalid cpu count: %d\n",
+ cpu_count);
+ return NULL;
+ }
+ } else
+ return NULL;
+
+ /* We do not alloc for nbp1 machine wide counter */
+ for (num = 0; num < AMD_FAM14H_STATE_NUM - 1; num++) {
+ previous_count[num] = calloc(cpu_count,
+ sizeof(unsigned long long));
+ current_count[num] = calloc(cpu_count,
+ sizeof(unsigned long long));
+ }
+
+ amd_fam14h_pci_dev = pci_acc_init(&pci_acc, pci_vendor_id, pci_dev_ids);
+ if (amd_fam14h_pci_dev == NULL || pci_acc == NULL)
+ return NULL;
+
+ if (!is_nbp1_capable())
+ amd_fam14h_monitor.hw_states_num = AMD_FAM14H_STATE_NUM - 1;
+
+ amd_fam14h_monitor.name_len = strlen(amd_fam14h_monitor.name);
+ return &amd_fam14h_monitor;
+}
+
+static void amd_fam14h_unregister(void)
+{
+ int num;
+ for (num = 0; num < AMD_FAM14H_STATE_NUM - 1; num++) {
+ free(previous_count[num]);
+ free(current_count[num]);
+ }
+ pci_cleanup(pci_acc);
+}
+
+struct cpuidle_monitor amd_fam14h_monitor = {
+ .name = "Ontario",
+ .hw_states = amd_fam14h_cstates,
+ .hw_states_num = AMD_FAM14H_STATE_NUM,
+ .start = amd_fam14h_start,
+ .stop = amd_fam14h_stop,
+ .do_register = amd_fam14h_register,
+ .unregister = amd_fam14h_unregister,
+ .needs_root = 1,
+ .overflow_s = OVERFLOW_MS / 1000,
+};
+#endif /* #if defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
new file mode 100644
index 00000000000..d048b96a615
--- /dev/null
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -0,0 +1,196 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+
+#include "helpers/sysfs.h"
+#include "helpers/helpers.h"
+#include "idle_monitor/cpupower-monitor.h"
+
+#define CPUIDLE_STATES_MAX 10
+static cstate_t cpuidle_cstates[CPUIDLE_STATES_MAX];
+struct cpuidle_monitor cpuidle_sysfs_monitor;
+
+static unsigned long long **previous_count;
+static unsigned long long **current_count;
+struct timespec start_time;
+static unsigned long long timediff;
+
+static int cpuidle_get_count_percent(unsigned int id, double *percent,
+ unsigned int cpu)
+{
+ unsigned long long statediff = current_count[cpu][id]
+ - previous_count[cpu][id];
+ dprint("%s: - diff: %llu - percent: %f (%u)\n",
+ cpuidle_cstates[id].name, timediff, *percent, cpu);
+
+ if (timediff == 0)
+ *percent = 0.0;
+ else
+ *percent = ((100.0 * statediff) / timediff);
+
+ dprint("%s: - timediff: %llu - statediff: %llu - percent: %f (%u)\n",
+ cpuidle_cstates[id].name, timediff, statediff, *percent, cpu);
+
+ return 0;
+}
+
+static int cpuidle_start(void)
+{
+ int cpu, state;
+ clock_gettime(CLOCK_REALTIME, &start_time);
+ for (cpu = 0; cpu < cpu_count; cpu++) {
+ for (state = 0; state < cpuidle_sysfs_monitor.hw_states_num;
+ state++) {
+ previous_count[cpu][state] =
+ sysfs_get_idlestate_time(cpu, state);
+ dprint("CPU %d - State: %d - Val: %llu\n",
+ cpu, state, previous_count[cpu][state]);
+ }
+ };
+ return 0;
+}
+
+static int cpuidle_stop(void)
+{
+ int cpu, state;
+ struct timespec end_time;
+ clock_gettime(CLOCK_REALTIME, &end_time);
+ timediff = timespec_diff_us(start_time, end_time);
+
+ for (cpu = 0; cpu < cpu_count; cpu++) {
+ for (state = 0; state < cpuidle_sysfs_monitor.hw_states_num;
+ state++) {
+ current_count[cpu][state] =
+ sysfs_get_idlestate_time(cpu, state);
+ dprint("CPU %d - State: %d - Val: %llu\n",
+ cpu, state, previous_count[cpu][state]);
+ }
+ };
+ return 0;
+}
+
+void fix_up_intel_idle_driver_name(char *tmp, int num)
+{
+ /* fix up cpuidle name for intel idle driver */
+ if (!strncmp(tmp, "NHM-", 4)) {
+ switch (num) {
+ case 1:
+ strcpy(tmp, "C1");
+ break;
+ case 2:
+ strcpy(tmp, "C3");
+ break;
+ case 3:
+ strcpy(tmp, "C6");
+ break;
+ }
+ } else if (!strncmp(tmp, "SNB-", 4)) {
+ switch (num) {
+ case 1:
+ strcpy(tmp, "C1");
+ break;
+ case 2:
+ strcpy(tmp, "C3");
+ break;
+ case 3:
+ strcpy(tmp, "C6");
+ break;
+ case 4:
+ strcpy(tmp, "C7");
+ break;
+ }
+ } else if (!strncmp(tmp, "ATM-", 4)) {
+ switch (num) {
+ case 1:
+ strcpy(tmp, "C1");
+ break;
+ case 2:
+ strcpy(tmp, "C2");
+ break;
+ case 3:
+ strcpy(tmp, "C4");
+ break;
+ case 4:
+ strcpy(tmp, "C6");
+ break;
+ }
+ }
+}
+
+static struct cpuidle_monitor *cpuidle_register(void)
+{
+ int num;
+ char *tmp;
+
+ /* Assume idle state count is the same for all CPUs */
+ cpuidle_sysfs_monitor.hw_states_num = sysfs_get_idlestate_count(0);
+
+ if (cpuidle_sysfs_monitor.hw_states_num == 0)
+ return NULL;
+
+ for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
+ tmp = sysfs_get_idlestate_name(0, num);
+ if (tmp == NULL)
+ continue;
+
+ fix_up_intel_idle_driver_name(tmp, num);
+ strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
+ free(tmp);
+
+ tmp = sysfs_get_idlestate_desc(0, num);
+ if (tmp == NULL)
+ continue;
+ strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
+ free(tmp);
+
+ cpuidle_cstates[num].range = RANGE_THREAD;
+ cpuidle_cstates[num].id = num;
+ cpuidle_cstates[num].get_count_percent =
+ cpuidle_get_count_percent;
+ };
+
+ /* Free this at program termination */
+ previous_count = malloc(sizeof(long long *) * cpu_count);
+ current_count = malloc(sizeof(long long *) * cpu_count);
+ for (num = 0; num < cpu_count; num++) {
+ previous_count[num] = malloc(sizeof(long long) *
+ cpuidle_sysfs_monitor.hw_states_num);
+ current_count[num] = malloc(sizeof(long long) *
+ cpuidle_sysfs_monitor.hw_states_num);
+ }
+
+ cpuidle_sysfs_monitor.name_len = strlen(cpuidle_sysfs_monitor.name);
+ return &cpuidle_sysfs_monitor;
+}
+
+void cpuidle_unregister(void)
+{
+ int num;
+
+ for (num = 0; num < cpu_count; num++) {
+ free(previous_count[num]);
+ free(current_count[num]);
+ }
+ free(previous_count);
+ free(current_count);
+}
+
+struct cpuidle_monitor cpuidle_sysfs_monitor = {
+ .name = "Idle_Stats",
+ .hw_states = cpuidle_cstates,
+ .start = cpuidle_start,
+ .stop = cpuidle_stop,
+ .do_register = cpuidle_register,
+ .unregister = cpuidle_unregister,
+ .needs_root = 0,
+ .overflow_s = UINT_MAX,
+};
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
new file mode 100644
index 00000000000..ba4bf068380
--- /dev/null
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -0,0 +1,448 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Output format inspired by Len Brown's <lenb@kernel.org> turbostat tool.
+ *
+ */
+
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <libgen.h>
+
+#include "idle_monitor/cpupower-monitor.h"
+#include "idle_monitor/idle_monitors.h"
+#include "helpers/helpers.h"
+
+/* Define pointers to all monitors. */
+#define DEF(x) & x ## _monitor ,
+struct cpuidle_monitor *all_monitors[] = {
+#include "idle_monitors.def"
+0
+};
+
+static struct cpuidle_monitor *monitors[MONITORS_MAX];
+static unsigned int avail_monitors;
+
+static char *progname;
+
+enum operation_mode_e { list = 1, show, show_all };
+static int mode;
+static int interval = 1;
+static char *show_monitors_param;
+static struct cpupower_topology cpu_top;
+
+/* ToDo: Document this in the manpage */
+static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', };
+
+long long timespec_diff_us(struct timespec start, struct timespec end)
+{
+ struct timespec temp;
+ if ((end.tv_nsec - start.tv_nsec) < 0) {
+ temp.tv_sec = end.tv_sec - start.tv_sec - 1;
+ temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
+ } else {
+ temp.tv_sec = end.tv_sec - start.tv_sec;
+ temp.tv_nsec = end.tv_nsec - start.tv_nsec;
+ }
+ return (temp.tv_sec * 1000000) + (temp.tv_nsec / 1000);
+}
+
+void monitor_help(void)
+{
+ printf(_("cpupower monitor: [-m <mon1>,[<mon2>],.. ] command\n"));
+ printf(_("cpupower monitor: [-m <mon1>,[<mon2>],.. ] [ -i interval_sec ]\n"));
+ printf(_("cpupower monitor: -l\n"));
+ printf(_("\t command: pass an arbitrary command to measure specific workload\n"));
+ printf(_("\t -i: time intervall to measure for in seconds (default 1)\n"));
+ printf(_("\t -l: list available CPU sleep monitors (for use with -m)\n"));
+ printf(_("\t -m: show specific CPU sleep monitors only (in same order)\n"));
+ printf(_("\t -h: print this help\n"));
+ printf("\n");
+ printf(_("only one of: -l, -m are allowed\nIf none of them is passed,"));
+ printf(_(" all supported monitors are shown\n"));
+}
+
+void print_n_spaces(int n)
+{
+ int x;
+ for (x = 0; x < n; x++)
+ printf(" ");
+}
+
+/* size of s must be at least n + 1 */
+int fill_string_with_spaces(char *s, int n)
+{
+ int len = strlen(s);
+ if (len > n)
+ return -1;
+ for (; len < n; len++)
+ s[len] = ' ';
+ s[len] = '\0';
+ return 0;
+}
+
+void print_header(int topology_depth)
+{
+ int unsigned mon;
+ int state, need_len, pr_mon_len;
+ cstate_t s;
+ char buf[128] = "";
+ int percent_width = 4;
+
+ fill_string_with_spaces(buf, topology_depth * 5 - 1);
+ printf("%s|", buf);
+
+ for (mon = 0; mon < avail_monitors; mon++) {
+ pr_mon_len = 0;
+ need_len = monitors[mon]->hw_states_num * (percent_width + 3)
+ - 1;
+ if (mon != 0) {
+ printf("|| ");
+ need_len--;
+ }
+ sprintf(buf, "%s", monitors[mon]->name);
+ fill_string_with_spaces(buf, need_len);
+ printf("%s", buf);
+ }
+ printf("\n");
+
+ if (topology_depth > 2)
+ printf("PKG |");
+ if (topology_depth > 1)
+ printf("CORE|");
+ if (topology_depth > 0)
+ printf("CPU |");
+
+ for (mon = 0; mon < avail_monitors; mon++) {
+ if (mon != 0)
+ printf("|| ");
+ else
+ printf(" ");
+ for (state = 0; state < monitors[mon]->hw_states_num; state++) {
+ if (state != 0)
+ printf(" | ");
+ s = monitors[mon]->hw_states[state];
+ sprintf(buf, "%s", s.name);
+ fill_string_with_spaces(buf, percent_width);
+ printf("%s", buf);
+ }
+ printf(" ");
+ }
+ printf("\n");
+}
+
+
+void print_results(int topology_depth, int cpu)
+{
+ unsigned int mon;
+ int state, ret;
+ double percent;
+ unsigned long long result;
+ cstate_t s;
+
+ if (topology_depth > 2)
+ printf("%4d|", cpu_top.core_info[cpu].pkg);
+ if (topology_depth > 1)
+ printf("%4d|", cpu_top.core_info[cpu].core);
+ if (topology_depth > 0)
+ printf("%4d|", cpu_top.core_info[cpu].cpu);
+
+ for (mon = 0; mon < avail_monitors; mon++) {
+ if (mon != 0)
+ printf("||");
+
+ for (state = 0; state < monitors[mon]->hw_states_num; state++) {
+ if (state != 0)
+ printf("|");
+
+ s = monitors[mon]->hw_states[state];
+
+ if (s.get_count_percent) {
+ ret = s.get_count_percent(s.id, &percent,
+ cpu_top.core_info[cpu].cpu);
+ if (ret)
+ printf("******");
+ else if (percent >= 100.0)
+ printf("%6.1f", percent);
+ else
+ printf("%6.2f", percent);
+ } else if (s.get_count) {
+ ret = s.get_count(s.id, &result,
+ cpu_top.core_info[cpu].cpu);
+ if (ret)
+ printf("******");
+ else
+ printf("%6llu", result);
+ } else {
+ printf(_("Monitor %s, Counter %s has no count "
+ "function. Implementation error\n"),
+ monitors[mon]->name, s.name);
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+ /* cpu offline */
+ if (cpu_top.core_info[cpu].pkg == -1 ||
+ cpu_top.core_info[cpu].core == -1) {
+ printf(_(" *is offline\n"));
+ return;
+ } else
+ printf("\n");
+}
+
+
+/* param: string passed by -m param (The list of monitors to show)
+ *
+ * Monitors must have been registered already, matching monitors
+ * are picked out and available monitors array is overridden
+ * with matching ones
+ *
+ * Monitors get sorted in the same order the user passes them
+*/
+
+static void parse_monitor_param(char *param)
+{
+ unsigned int num;
+ int mon, hits = 0;
+ char *tmp = param, *token;
+ struct cpuidle_monitor *tmp_mons[MONITORS_MAX];
+
+
+ for (mon = 0; mon < MONITORS_MAX; mon++, tmp = NULL) {
+ token = strtok(tmp, ",");
+ if (token == NULL)
+ break;
+ if (strlen(token) >= MONITOR_NAME_LEN) {
+ printf(_("%s: max monitor name length"
+ " (%d) exceeded\n"), token, MONITOR_NAME_LEN);
+ continue;
+ }
+
+ for (num = 0; num < avail_monitors; num++) {
+ if (!strcmp(monitors[num]->name, token)) {
+ dprint("Found requested monitor: %s\n", token);
+ tmp_mons[hits] = monitors[num];
+ hits++;
+ }
+ }
+ }
+ if (hits == 0) {
+ printf(_("No matching monitor found in %s, "
+ "try -l option\n"), param);
+ monitor_help();
+ exit(EXIT_FAILURE);
+ }
+ /* Override detected/registerd monitors array with requested one */
+ memcpy(monitors, tmp_mons,
+ sizeof(struct cpuidle_monitor *) * MONITORS_MAX);
+ avail_monitors = hits;
+}
+
+void list_monitors(void)
+{
+ unsigned int mon;
+ int state;
+ cstate_t s;
+
+ for (mon = 0; mon < avail_monitors; mon++) {
+ printf(_("Monitor \"%s\" (%d states) - Might overflow after %u "
+ "s\n"),
+ monitors[mon]->name, monitors[mon]->hw_states_num,
+ monitors[mon]->overflow_s);
+
+ for (state = 0; state < monitors[mon]->hw_states_num; state++) {
+ s = monitors[mon]->hw_states[state];
+ /*
+ * ToDo show more state capabilities:
+ * percent, time (granlarity)
+ */
+ printf("%s\t[%c] -> %s\n", s.name, range_abbr[s.range],
+ gettext(s.desc));
+ }
+ }
+}
+
+int fork_it(char **argv)
+{
+ int status;
+ unsigned int num;
+ unsigned long long timediff;
+ pid_t child_pid;
+ struct timespec start, end;
+
+ child_pid = fork();
+ clock_gettime(CLOCK_REALTIME, &start);
+
+ for (num = 0; num < avail_monitors; num++)
+ monitors[num]->start();
+
+ if (!child_pid) {
+ /* child */
+ execvp(argv[0], argv);
+ } else {
+ /* parent */
+ if (child_pid == -1) {
+ perror("fork");
+ exit(1);
+ }
+
+ signal(SIGINT, SIG_IGN);
+ signal(SIGQUIT, SIG_IGN);
+ if (waitpid(child_pid, &status, 0) == -1) {
+ perror("wait");
+ exit(1);
+ }
+ }
+ clock_gettime(CLOCK_REALTIME, &end);
+ for (num = 0; num < avail_monitors; num++)
+ monitors[num]->stop();
+
+ timediff = timespec_diff_us(start, end);
+ if (WIFEXITED(status))
+ printf(_("%s took %.5f seconds and exited with status %d\n"),
+ argv[0], timediff / (1000.0 * 1000),
+ WEXITSTATUS(status));
+ return 0;
+}
+
+int do_interval_measure(int i)
+{
+ unsigned int num;
+
+ for (num = 0; num < avail_monitors; num++) {
+ dprint("HW C-state residency monitor: %s - States: %d\n",
+ monitors[num]->name, monitors[num]->hw_states_num);
+ monitors[num]->start();
+ }
+ sleep(i);
+ for (num = 0; num < avail_monitors; num++)
+ monitors[num]->stop();
+
+ return 0;
+}
+
+static void cmdline(int argc, char *argv[])
+{
+ int opt;
+ progname = basename(argv[0]);
+
+ while ((opt = getopt(argc, argv, "+hli:m:")) != -1) {
+ switch (opt) {
+ case 'h':
+ monitor_help();
+ exit(EXIT_SUCCESS);
+ case 'l':
+ if (mode) {
+ monitor_help();
+ exit(EXIT_FAILURE);
+ }
+ mode = list;
+ break;
+ case 'i':
+ /* only allow -i with -m or no option */
+ if (mode && mode != show) {
+ monitor_help();
+ exit(EXIT_FAILURE);
+ }
+ interval = atoi(optarg);
+ break;
+ case 'm':
+ if (mode) {
+ monitor_help();
+ exit(EXIT_FAILURE);
+ }
+ mode = show;
+ show_monitors_param = optarg;
+ break;
+ default:
+ monitor_help();
+ exit(EXIT_FAILURE);
+ }
+ }
+ if (!mode)
+ mode = show_all;
+}
+
+int cmd_monitor(int argc, char **argv)
+{
+ unsigned int num;
+ struct cpuidle_monitor *test_mon;
+ int cpu;
+
+ cmdline(argc, argv);
+ cpu_count = get_cpu_topology(&cpu_top);
+ if (cpu_count < 0) {
+ printf(_("Cannot read number of available processors\n"));
+ return EXIT_FAILURE;
+ }
+
+ dprint("System has up to %d CPU cores\n", cpu_count);
+
+ for (num = 0; all_monitors[num]; num++) {
+ dprint("Try to register: %s\n", all_monitors[num]->name);
+ test_mon = all_monitors[num]->do_register();
+ if (test_mon) {
+ if (test_mon->needs_root && !run_as_root) {
+ fprintf(stderr, _("Available monitor %s needs "
+ "root access\n"), test_mon->name);
+ continue;
+ }
+ monitors[avail_monitors] = test_mon;
+ dprint("%s registered\n", all_monitors[num]->name);
+ avail_monitors++;
+ }
+ }
+
+ if (avail_monitors == 0) {
+ printf(_("No HW Cstate monitors found\n"));
+ return 1;
+ }
+
+ if (mode == list) {
+ list_monitors();
+ exit(EXIT_SUCCESS);
+ }
+
+ if (mode == show)
+ parse_monitor_param(show_monitors_param);
+
+ dprint("Packages: %d - Cores: %d - CPUs: %d\n",
+ cpu_top.pkgs, cpu_top.cores, cpu_count);
+
+ /*
+ * if any params left, it must be a command to fork
+ */
+ if (argc - optind)
+ fork_it(argv + optind);
+ else
+ do_interval_measure(interval);
+
+ /* ToDo: Topology parsing needs fixing first to do
+ this more generically */
+ if (cpu_top.pkgs > 1)
+ print_header(3);
+ else
+ print_header(1);
+
+ for (cpu = 0; cpu < cpu_count; cpu++) {
+ if (cpu_top.pkgs > 1)
+ print_results(3, cpu);
+ else
+ print_results(1, cpu);
+ }
+
+ for (num = 0; num < avail_monitors; num++)
+ monitors[num]->unregister();
+
+ cpu_topology_release(cpu_top);
+ return 0;
+}
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
new file mode 100644
index 00000000000..9312ee1f2db
--- /dev/null
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
@@ -0,0 +1,68 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ */
+
+#ifndef __CPUIDLE_INFO_HW__
+#define __CPUIDLE_INFO_HW__
+
+#include <stdarg.h>
+#include <time.h>
+
+#include "idle_monitor/idle_monitors.h"
+
+#define MONITORS_MAX 20
+#define MONITOR_NAME_LEN 20
+#define CSTATE_NAME_LEN 5
+#define CSTATE_DESC_LEN 60
+
+int cpu_count;
+
+/* Hard to define the right names ...: */
+enum power_range_e {
+ RANGE_THREAD, /* Lowest in topology hierarcy, AMD: core, Intel: thread
+ kernel sysfs: cpu */
+ RANGE_CORE, /* AMD: unit, Intel: core, kernel_sysfs: core_id */
+ RANGE_PACKAGE, /* Package, processor socket */
+ RANGE_MACHINE, /* Machine, platform wide */
+ RANGE_MAX };
+
+typedef struct cstate {
+ int id;
+ enum power_range_e range;
+ char name[CSTATE_NAME_LEN];
+ char desc[CSTATE_DESC_LEN];
+
+ /* either provide a percentage or a general count */
+ int (*get_count_percent)(unsigned int self_id, double *percent,
+ unsigned int cpu);
+ int (*get_count)(unsigned int self_id, unsigned long long *count,
+ unsigned int cpu);
+} cstate_t;
+
+struct cpuidle_monitor {
+ /* Name must not contain whitespaces */
+ char name[MONITOR_NAME_LEN];
+ int name_len;
+ int hw_states_num;
+ cstate_t *hw_states;
+ int (*start) (void);
+ int (*stop) (void);
+ struct cpuidle_monitor* (*do_register) (void);
+ void (*unregister)(void);
+ unsigned int overflow_s;
+ int needs_root;
+};
+
+extern long long timespec_diff_us(struct timespec start, struct timespec end);
+
+#define print_overflow_err(mes, ov) \
+{ \
+ fprintf(stderr, gettext("Measure took %u seconds, but registers could " \
+ "overflow at %u seconds, results " \
+ "could be inaccurate\n"), mes, ov); \
+}
+
+#endif /* __CPUIDLE_INFO_HW__ */
diff --git a/tools/power/cpupower/utils/idle_monitor/idle_monitors.def b/tools/power/cpupower/utils/idle_monitor/idle_monitors.def
new file mode 100644
index 00000000000..e3f8d9b2b18
--- /dev/null
+++ b/tools/power/cpupower/utils/idle_monitor/idle_monitors.def
@@ -0,0 +1,7 @@
+#if defined(__i386__) || defined(__x86_64__)
+DEF(amd_fam14h)
+DEF(intel_nhm)
+DEF(intel_snb)
+DEF(mperf)
+#endif
+DEF(cpuidle_sysfs)
diff --git a/tools/power/cpupower/utils/idle_monitor/idle_monitors.h b/tools/power/cpupower/utils/idle_monitor/idle_monitors.h
new file mode 100644
index 00000000000..4fcdeb1e07e
--- /dev/null
+++ b/tools/power/cpupower/utils/idle_monitor/idle_monitors.h
@@ -0,0 +1,18 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Based on the idea from Michael Matz <matz@suse.de>
+ *
+ */
+
+#ifndef _CPUIDLE_IDLE_MONITORS_H_
+#define _CPUIDLE_IDLE_MONITORS_H_
+
+#define DEF(x) extern struct cpuidle_monitor x ##_monitor;
+#include "idle_monitors.def"
+#undef DEF
+extern struct cpuidle_monitor *all_monitors[];
+
+#endif /* _CPUIDLE_IDLE_MONITORS_H_ */
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
new file mode 100644
index 00000000000..63ca87a05e5
--- /dev/null
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -0,0 +1,255 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#include <cpufreq.h>
+
+#include "helpers/helpers.h"
+#include "idle_monitor/cpupower-monitor.h"
+
+#define MSR_APERF 0xE8
+#define MSR_MPERF 0xE7
+
+#define MSR_TSC 0x10
+
+enum mperf_id { C0 = 0, Cx, AVG_FREQ, MPERF_CSTATE_COUNT };
+
+static int mperf_get_count_percent(unsigned int self_id, double *percent,
+ unsigned int cpu);
+static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+ unsigned int cpu);
+
+static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = {
+ {
+ .name = "C0",
+ .desc = N_("Processor Core not idle"),
+ .id = C0,
+ .range = RANGE_THREAD,
+ .get_count_percent = mperf_get_count_percent,
+ },
+ {
+ .name = "Cx",
+ .desc = N_("Processor Core in an idle state"),
+ .id = Cx,
+ .range = RANGE_THREAD,
+ .get_count_percent = mperf_get_count_percent,
+ },
+
+ {
+ .name = "Freq",
+ .desc = N_("Average Frequency (including boost) in MHz"),
+ .id = AVG_FREQ,
+ .range = RANGE_THREAD,
+ .get_count = mperf_get_count_freq,
+ },
+};
+
+static unsigned long long tsc_at_measure_start;
+static unsigned long long tsc_at_measure_end;
+static unsigned long max_frequency;
+static unsigned long long *mperf_previous_count;
+static unsigned long long *aperf_previous_count;
+static unsigned long long *mperf_current_count;
+static unsigned long long *aperf_current_count;
+/* valid flag for all CPUs. If a MSR read failed it will be zero */
+static int *is_valid;
+
+static int mperf_get_tsc(unsigned long long *tsc)
+{
+ return read_msr(0, MSR_TSC, tsc);
+}
+
+static int mperf_init_stats(unsigned int cpu)
+{
+ unsigned long long val;
+ int ret;
+
+ ret = read_msr(cpu, MSR_APERF, &val);
+ aperf_previous_count[cpu] = val;
+ ret |= read_msr(cpu, MSR_MPERF, &val);
+ mperf_previous_count[cpu] = val;
+ is_valid[cpu] = !ret;
+
+ return 0;
+}
+
+static int mperf_measure_stats(unsigned int cpu)
+{
+ unsigned long long val;
+ int ret;
+
+ ret = read_msr(cpu, MSR_APERF, &val);
+ aperf_current_count[cpu] = val;
+ ret |= read_msr(cpu, MSR_MPERF, &val);
+ mperf_current_count[cpu] = val;
+ is_valid[cpu] = !ret;
+
+ return 0;
+}
+
+/*
+ * get_average_perf()
+ *
+ * Returns the average performance (also considers boosted frequencies)
+ *
+ * Input:
+ * aperf_diff: Difference of the aperf register over a time period
+ * mperf_diff: Difference of the mperf register over the same time period
+ * max_freq: Maximum frequency (P0)
+ *
+ * Returns:
+ * Average performance over the time period
+ */
+static unsigned long get_average_perf(unsigned long long aperf_diff,
+ unsigned long long mperf_diff)
+{
+ unsigned int perf_percent = 0;
+ if (((unsigned long)(-1) / 100) < aperf_diff) {
+ int shift_count = 7;
+ aperf_diff >>= shift_count;
+ mperf_diff >>= shift_count;
+ }
+ perf_percent = (aperf_diff * 100) / mperf_diff;
+ return (max_frequency * perf_percent) / 100;
+}
+
+static int mperf_get_count_percent(unsigned int id, double *percent,
+ unsigned int cpu)
+{
+ unsigned long long aperf_diff, mperf_diff, tsc_diff;
+
+ if (!is_valid[cpu])
+ return -1;
+
+ if (id != C0 && id != Cx)
+ return -1;
+
+ mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu];
+ aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
+ tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
+
+ *percent = 100.0 * mperf_diff / tsc_diff;
+ dprint("%s: mperf_diff: %llu, tsc_diff: %llu\n",
+ mperf_cstates[id].name, mperf_diff, tsc_diff);
+
+ if (id == Cx)
+ *percent = 100.0 - *percent;
+
+ dprint("%s: previous: %llu - current: %llu - (%u)\n",
+ mperf_cstates[id].name, mperf_diff, aperf_diff, cpu);
+ dprint("%s: %f\n", mperf_cstates[id].name, *percent);
+ return 0;
+}
+
+static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
+ unsigned int cpu)
+{
+ unsigned long long aperf_diff, mperf_diff;
+
+ if (id != AVG_FREQ)
+ return 1;
+
+ if (!is_valid[cpu])
+ return -1;
+
+ mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu];
+ aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
+
+ /* Return MHz for now, might want to return KHz if column width is more
+ generic */
+ *count = get_average_perf(aperf_diff, mperf_diff) / 1000;
+ dprint("%s: %llu\n", mperf_cstates[id].name, *count);
+
+ return 0;
+}
+
+static int mperf_start(void)
+{
+ int cpu;
+ unsigned long long dbg;
+
+ mperf_get_tsc(&tsc_at_measure_start);
+
+ for (cpu = 0; cpu < cpu_count; cpu++)
+ mperf_init_stats(cpu);
+
+ mperf_get_tsc(&dbg);
+ dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
+ return 0;
+}
+
+static int mperf_stop(void)
+{
+ unsigned long long dbg;
+ int cpu;
+
+ mperf_get_tsc(&tsc_at_measure_end);
+
+ for (cpu = 0; cpu < cpu_count; cpu++)
+ mperf_measure_stats(cpu);
+
+ mperf_get_tsc(&dbg);
+ dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
+
+ return 0;
+}
+
+struct cpuidle_monitor mperf_monitor;
+
+struct cpuidle_monitor *mperf_register(void)
+{
+ unsigned long min;
+
+ if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF))
+ return NULL;
+
+ /* Assume min/max all the same on all cores */
+ if (cpufreq_get_hardware_limits(0, &min, &max_frequency)) {
+ dprint("Cannot retrieve max freq from cpufreq kernel "
+ "subsystem\n");
+ return NULL;
+ }
+
+ /* Free this at program termination */
+ is_valid = calloc(cpu_count, sizeof(int));
+ mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
+ aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
+ mperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
+ aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
+
+ mperf_monitor.name_len = strlen(mperf_monitor.name);
+ return &mperf_monitor;
+}
+
+void mperf_unregister(void)
+{
+ free(mperf_previous_count);
+ free(aperf_previous_count);
+ free(mperf_current_count);
+ free(aperf_current_count);
+ free(is_valid);
+}
+
+struct cpuidle_monitor mperf_monitor = {
+ .name = "Mperf",
+ .hw_states_num = MPERF_CSTATE_COUNT,
+ .hw_states = mperf_cstates,
+ .start = mperf_start,
+ .stop = mperf_stop,
+ .do_register = mperf_register,
+ .unregister = mperf_unregister,
+ .needs_root = 1,
+ .overflow_s = 922000000 /* 922337203 seconds TSC overflow
+ at 20GHz */
+};
+#endif /* #if defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/idle_monitor/nhm_idle.c b/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
new file mode 100644
index 00000000000..d2a91dd0d56
--- /dev/null
+++ b/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
@@ -0,0 +1,216 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Based on Len Brown's <lenb@kernel.org> turbostat tool.
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "helpers/helpers.h"
+#include "idle_monitor/cpupower-monitor.h"
+
+#define MSR_PKG_C3_RESIDENCY 0x3F8
+#define MSR_PKG_C6_RESIDENCY 0x3F9
+#define MSR_CORE_C3_RESIDENCY 0x3FC
+#define MSR_CORE_C6_RESIDENCY 0x3FD
+
+#define MSR_TSC 0x10
+
+#define NHM_CSTATE_COUNT 4
+
+enum intel_nhm_id { C3 = 0, C6, PC3, PC6, TSC = 0xFFFF };
+
+static int nhm_get_count_percent(unsigned int self_id, double *percent,
+ unsigned int cpu);
+
+static cstate_t nhm_cstates[NHM_CSTATE_COUNT] = {
+ {
+ .name = "C3",
+ .desc = N_("Processor Core C3"),
+ .id = C3,
+ .range = RANGE_CORE,
+ .get_count_percent = nhm_get_count_percent,
+ },
+ {
+ .name = "C6",
+ .desc = N_("Processor Core C6"),
+ .id = C6,
+ .range = RANGE_CORE,
+ .get_count_percent = nhm_get_count_percent,
+ },
+
+ {
+ .name = "PC3",
+ .desc = N_("Processor Package C3"),
+ .id = PC3,
+ .range = RANGE_PACKAGE,
+ .get_count_percent = nhm_get_count_percent,
+ },
+ {
+ .name = "PC6",
+ .desc = N_("Processor Package C6"),
+ .id = PC6,
+ .range = RANGE_PACKAGE,
+ .get_count_percent = nhm_get_count_percent,
+ },
+};
+
+static unsigned long long tsc_at_measure_start;
+static unsigned long long tsc_at_measure_end;
+static unsigned long long *previous_count[NHM_CSTATE_COUNT];
+static unsigned long long *current_count[NHM_CSTATE_COUNT];
+/* valid flag for all CPUs. If a MSR read failed it will be zero */
+static int *is_valid;
+
+static int nhm_get_count(enum intel_nhm_id id, unsigned long long *val,
+ unsigned int cpu)
+{
+ int msr;
+
+ switch (id) {
+ case C3:
+ msr = MSR_CORE_C3_RESIDENCY;
+ break;
+ case C6:
+ msr = MSR_CORE_C6_RESIDENCY;
+ break;
+ case PC3:
+ msr = MSR_PKG_C3_RESIDENCY;
+ break;
+ case PC6:
+ msr = MSR_PKG_C6_RESIDENCY;
+ break;
+ case TSC:
+ msr = MSR_TSC;
+ break;
+ default:
+ return -1;
+ };
+ if (read_msr(cpu, msr, val))
+ return -1;
+
+ return 0;
+}
+
+static int nhm_get_count_percent(unsigned int id, double *percent,
+ unsigned int cpu)
+{
+ *percent = 0.0;
+
+ if (!is_valid[cpu])
+ return -1;
+
+ *percent = (100.0 *
+ (current_count[id][cpu] - previous_count[id][cpu])) /
+ (tsc_at_measure_end - tsc_at_measure_start);
+
+ dprint("%s: previous: %llu - current: %llu - (%u)\n",
+ nhm_cstates[id].name, previous_count[id][cpu],
+ current_count[id][cpu], cpu);
+
+ dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n",
+ nhm_cstates[id].name,
+ (unsigned long long) tsc_at_measure_end - tsc_at_measure_start,
+ current_count[id][cpu] - previous_count[id][cpu],
+ *percent, cpu);
+
+ return 0;
+}
+
+static int nhm_start(void)
+{
+ int num, cpu;
+ unsigned long long dbg, val;
+
+ nhm_get_count(TSC, &tsc_at_measure_start, 0);
+
+ for (num = 0; num < NHM_CSTATE_COUNT; num++) {
+ for (cpu = 0; cpu < cpu_count; cpu++) {
+ is_valid[cpu] = !nhm_get_count(num, &val, cpu);
+ previous_count[num][cpu] = val;
+ }
+ }
+ nhm_get_count(TSC, &dbg, 0);
+ dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
+ return 0;
+}
+
+static int nhm_stop(void)
+{
+ unsigned long long val;
+ unsigned long long dbg;
+ int num, cpu;
+
+ nhm_get_count(TSC, &tsc_at_measure_end, 0);
+
+ for (num = 0; num < NHM_CSTATE_COUNT; num++) {
+ for (cpu = 0; cpu < cpu_count; cpu++) {
+ is_valid[cpu] = !nhm_get_count(num, &val, cpu);
+ current_count[num][cpu] = val;
+ }
+ }
+ nhm_get_count(TSC, &dbg, 0);
+ dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
+
+ return 0;
+}
+
+struct cpuidle_monitor intel_nhm_monitor;
+
+struct cpuidle_monitor *intel_nhm_register(void)
+{
+ int num;
+
+ if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
+ return NULL;
+
+ if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC))
+ return NULL;
+
+ if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF))
+ return NULL;
+
+ /* Free this at program termination */
+ is_valid = calloc(cpu_count, sizeof(int));
+ for (num = 0; num < NHM_CSTATE_COUNT; num++) {
+ previous_count[num] = calloc(cpu_count,
+ sizeof(unsigned long long));
+ current_count[num] = calloc(cpu_count,
+ sizeof(unsigned long long));
+ }
+
+ intel_nhm_monitor.name_len = strlen(intel_nhm_monitor.name);
+ return &intel_nhm_monitor;
+}
+
+void intel_nhm_unregister(void)
+{
+ int num;
+
+ for (num = 0; num < NHM_CSTATE_COUNT; num++) {
+ free(previous_count[num]);
+ free(current_count[num]);
+ }
+ free(is_valid);
+}
+
+struct cpuidle_monitor intel_nhm_monitor = {
+ .name = "Nehalem",
+ .hw_states_num = NHM_CSTATE_COUNT,
+ .hw_states = nhm_cstates,
+ .start = nhm_start,
+ .stop = nhm_stop,
+ .do_register = intel_nhm_register,
+ .unregister = intel_nhm_unregister,
+ .needs_root = 1,
+ .overflow_s = 922000000 /* 922337203 seconds TSC overflow
+ at 20GHz */
+};
+#endif
diff --git a/tools/power/cpupower/utils/idle_monitor/snb_idle.c b/tools/power/cpupower/utils/idle_monitor/snb_idle.c
new file mode 100644
index 00000000000..a1bc07cd53e
--- /dev/null
+++ b/tools/power/cpupower/utils/idle_monitor/snb_idle.c
@@ -0,0 +1,190 @@
+/*
+ * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Based on Len Brown's <lenb@kernel.org> turbostat tool.
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "helpers/helpers.h"
+#include "idle_monitor/cpupower-monitor.h"
+
+#define MSR_PKG_C2_RESIDENCY 0x60D
+#define MSR_PKG_C7_RESIDENCY 0x3FA
+#define MSR_CORE_C7_RESIDENCY 0x3FE
+
+#define MSR_TSC 0x10
+
+enum intel_snb_id { C7 = 0, PC2, PC7, SNB_CSTATE_COUNT, TSC = 0xFFFF };
+
+static int snb_get_count_percent(unsigned int self_id, double *percent,
+ unsigned int cpu);
+
+static cstate_t snb_cstates[SNB_CSTATE_COUNT] = {
+ {
+ .name = "C7",
+ .desc = N_("Processor Core C7"),
+ .id = C7,
+ .range = RANGE_CORE,
+ .get_count_percent = snb_get_count_percent,
+ },
+ {
+ .name = "PC2",
+ .desc = N_("Processor Package C2"),
+ .id = PC2,
+ .range = RANGE_PACKAGE,
+ .get_count_percent = snb_get_count_percent,
+ },
+ {
+ .name = "PC7",
+ .desc = N_("Processor Package C7"),
+ .id = PC7,
+ .range = RANGE_PACKAGE,
+ .get_count_percent = snb_get_count_percent,
+ },
+};
+
+static unsigned long long tsc_at_measure_start;
+static unsigned long long tsc_at_measure_end;
+static unsigned long long *previous_count[SNB_CSTATE_COUNT];
+static unsigned long long *current_count[SNB_CSTATE_COUNT];
+/* valid flag for all CPUs. If a MSR read failed it will be zero */
+static int *is_valid;
+
+static int snb_get_count(enum intel_snb_id id, unsigned long long *val,
+ unsigned int cpu)
+{
+ int msr;
+
+ switch (id) {
+ case C7:
+ msr = MSR_CORE_C7_RESIDENCY;
+ break;
+ case PC2:
+ msr = MSR_PKG_C2_RESIDENCY;
+ break;
+ case PC7:
+ msr = MSR_PKG_C7_RESIDENCY;
+ break;
+ case TSC:
+ msr = MSR_TSC;
+ break;
+ default:
+ return -1;
+ };
+ if (read_msr(cpu, msr, val))
+ return -1;
+ return 0;
+}
+
+static int snb_get_count_percent(unsigned int id, double *percent,
+ unsigned int cpu)
+{
+ *percent = 0.0;
+
+ if (!is_valid[cpu])
+ return -1;
+
+ *percent = (100.0 *
+ (current_count[id][cpu] - previous_count[id][cpu])) /
+ (tsc_at_measure_end - tsc_at_measure_start);
+
+ dprint("%s: previous: %llu - current: %llu - (%u)\n",
+ snb_cstates[id].name, previous_count[id][cpu],
+ current_count[id][cpu], cpu);
+
+ dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n",
+ snb_cstates[id].name,
+ (unsigned long long) tsc_at_measure_end - tsc_at_measure_start,
+ current_count[id][cpu] - previous_count[id][cpu],
+ *percent, cpu);
+
+ return 0;
+}
+
+static int snb_start(void)
+{
+ int num, cpu;
+ unsigned long long val;
+
+ for (num = 0; num < SNB_CSTATE_COUNT; num++) {
+ for (cpu = 0; cpu < cpu_count; cpu++) {
+ snb_get_count(num, &val, cpu);
+ previous_count[num][cpu] = val;
+ }
+ }
+ snb_get_count(TSC, &tsc_at_measure_start, 0);
+ return 0;
+}
+
+static int snb_stop(void)
+{
+ unsigned long long val;
+ int num, cpu;
+
+ snb_get_count(TSC, &tsc_at_measure_end, 0);
+
+ for (num = 0; num < SNB_CSTATE_COUNT; num++) {
+ for (cpu = 0; cpu < cpu_count; cpu++) {
+ is_valid[cpu] = !snb_get_count(num, &val, cpu);
+ current_count[num][cpu] = val;
+ }
+ }
+ return 0;
+}
+
+struct cpuidle_monitor intel_snb_monitor;
+
+static struct cpuidle_monitor *snb_register(void)
+{
+ int num;
+
+ if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL
+ || cpupower_cpu_info.family != 6)
+ return NULL;
+
+ if (cpupower_cpu_info.model != 0x2A
+ && cpupower_cpu_info.model != 0x2D)
+ return NULL;
+
+ is_valid = calloc(cpu_count, sizeof(int));
+ for (num = 0; num < SNB_CSTATE_COUNT; num++) {
+ previous_count[num] = calloc(cpu_count,
+ sizeof(unsigned long long));
+ current_count[num] = calloc(cpu_count,
+ sizeof(unsigned long long));
+ }
+ intel_snb_monitor.name_len = strlen(intel_snb_monitor.name);
+ return &intel_snb_monitor;
+}
+
+void snb_unregister(void)
+{
+ int num;
+ free(is_valid);
+ for (num = 0; num < SNB_CSTATE_COUNT; num++) {
+ free(previous_count[num]);
+ free(current_count[num]);
+ }
+}
+
+struct cpuidle_monitor intel_snb_monitor = {
+ .name = "SandyBridge",
+ .hw_states = snb_cstates,
+ .hw_states_num = SNB_CSTATE_COUNT,
+ .start = snb_start,
+ .stop = snb_stop,
+ .do_register = snb_register,
+ .unregister = snb_unregister,
+ .needs_root = 1,
+ .overflow_s = 922000000 /* 922337203 seconds TSC overflow
+ at 20GHz */
+};
+#endif /* defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/version-gen.sh b/tools/power/cpupower/utils/version-gen.sh
new file mode 100755
index 00000000000..5ec41c55699
--- /dev/null
+++ b/tools/power/cpupower/utils/version-gen.sh
@@ -0,0 +1,35 @@
+#!/bin/sh
+#
+# Script which prints out the version to use for building cpupowerutils.
+# Must be called from tools/power/cpupower/
+#
+# Heavily based on tools/perf/util/PERF-VERSION-GEN .
+
+LF='
+'
+
+# First check if there is a .git to get the version from git describe
+# otherwise try to get the version from the kernel makefile
+if test -d ../../../.git -o -f ../../../.git &&
+ VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
+ case "$VN" in
+ *$LF*) (exit 1) ;;
+ v[0-9]*)
+ git update-index -q --refresh
+ test -z "$(git diff-index --name-only HEAD --)" ||
+ VN="$VN-dirty" ;;
+ esac
+then
+ VN=$(echo "$VN" | sed -e 's/-/./g');
+else
+ eval $(grep '^VERSION[[:space:]]*=' ../../../Makefile|tr -d ' ')
+ eval $(grep '^PATCHLEVEL[[:space:]]*=' ../../../Makefile|tr -d ' ')
+ eval $(grep '^SUBLEVEL[[:space:]]*=' ../../../Makefile|tr -d ' ')
+ eval $(grep '^EXTRAVERSION[[:space:]]*=' ../../../Makefile|tr -d ' ')
+
+ VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
+fi
+
+VN=$(expr "$VN" : v*'\(.*\)')
+
+echo $VN
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 6d8ef4a3a9b..8b2d37b59c9 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -128,34 +128,34 @@ unsigned long long get_msr(int cpu, off_t offset)
void print_header(void)
{
if (show_pkg)
- fprintf(stderr, "pkg ");
+ fprintf(stderr, "pk");
if (show_core)
- fprintf(stderr, "core");
+ fprintf(stderr, " cr");
if (show_cpu)
fprintf(stderr, " CPU");
if (do_nhm_cstates)
- fprintf(stderr, " %%c0 ");
+ fprintf(stderr, " %%c0 ");
if (has_aperf)
- fprintf(stderr, " GHz");
+ fprintf(stderr, " GHz");
fprintf(stderr, " TSC");
if (do_nhm_cstates)
- fprintf(stderr, " %%c1 ");
+ fprintf(stderr, " %%c1");
if (do_nhm_cstates)
- fprintf(stderr, " %%c3 ");
+ fprintf(stderr, " %%c3");
if (do_nhm_cstates)
- fprintf(stderr, " %%c6 ");
+ fprintf(stderr, " %%c6");
if (do_snb_cstates)
- fprintf(stderr, " %%c7 ");
+ fprintf(stderr, " %%c7");
if (do_snb_cstates)
- fprintf(stderr, " %%pc2 ");
+ fprintf(stderr, " %%pc2");
if (do_nhm_cstates)
- fprintf(stderr, " %%pc3 ");
+ fprintf(stderr, " %%pc3");
if (do_nhm_cstates)
- fprintf(stderr, " %%pc6 ");
+ fprintf(stderr, " %%pc6");
if (do_snb_cstates)
- fprintf(stderr, " %%pc7 ");
+ fprintf(stderr, " %%pc7");
if (extra_msr_offset)
- fprintf(stderr, " MSR 0x%x ", extra_msr_offset);
+ fprintf(stderr, " MSR 0x%x ", extra_msr_offset);
putc('\n', stderr);
}
@@ -194,14 +194,14 @@ void print_cnt(struct counters *p)
/* topology columns, print blanks on 1st (average) line */
if (p == cnt_average) {
if (show_pkg)
- fprintf(stderr, " ");
+ fprintf(stderr, " ");
if (show_core)
fprintf(stderr, " ");
if (show_cpu)
fprintf(stderr, " ");
} else {
if (show_pkg)
- fprintf(stderr, "%4d", p->pkg);
+ fprintf(stderr, "%d", p->pkg);
if (show_core)
fprintf(stderr, "%4d", p->core);
if (show_cpu)
@@ -241,22 +241,22 @@ void print_cnt(struct counters *p)
if (!skip_c1)
fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc);
else
- fprintf(stderr, " ****");
+ fprintf(stderr, " ****");
}
if (do_nhm_cstates)
- fprintf(stderr, "%7.2f", 100.0 * p->c3/p->tsc);
+ fprintf(stderr, " %6.2f", 100.0 * p->c3/p->tsc);
if (do_nhm_cstates)
- fprintf(stderr, "%7.2f", 100.0 * p->c6/p->tsc);
+ fprintf(stderr, " %6.2f", 100.0 * p->c6/p->tsc);
if (do_snb_cstates)
- fprintf(stderr, "%7.2f", 100.0 * p->c7/p->tsc);
+ fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc);
if (do_snb_cstates)
- fprintf(stderr, "%7.2f", 100.0 * p->pc2/p->tsc);
+ fprintf(stderr, " %5.2f", 100.0 * p->pc2/p->tsc);
if (do_nhm_cstates)
- fprintf(stderr, "%7.2f", 100.0 * p->pc3/p->tsc);
+ fprintf(stderr, " %5.2f", 100.0 * p->pc3/p->tsc);
if (do_nhm_cstates)
- fprintf(stderr, "%7.2f", 100.0 * p->pc6/p->tsc);
+ fprintf(stderr, " %5.2f", 100.0 * p->pc6/p->tsc);
if (do_snb_cstates)
- fprintf(stderr, "%7.2f", 100.0 * p->pc7/p->tsc);
+ fprintf(stderr, " %5.2f", 100.0 * p->pc7/p->tsc);
if (extra_msr_offset)
fprintf(stderr, " 0x%016llx", p->extra_msr);
putc('\n', stderr);
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 2618ef2ba31..33c5c7ee148 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -137,7 +137,6 @@ void cmdline(int argc, char **argv)
void validate_cpuid(void)
{
unsigned int eax, ebx, ecx, edx, max_level;
- char brand[16];
unsigned int fms, family, model, stepping;
eax = ebx = ecx = edx = 0;
@@ -160,8 +159,8 @@ void validate_cpuid(void)
model += ((fms >> 16) & 0xf) << 4;
if (verbose > 1)
- printf("CPUID %s %d levels family:model:stepping "
- "0x%x:%x:%x (%d:%d:%d)\n", brand, max_level,
+ printf("CPUID %d levels family:model:stepping "
+ "0x%x:%x:%x (%d:%d:%d)\n", max_level,
family, model, stepping, family, model, stepping);
if (!(edx & (1 << 5))) {