diff options
author | David Barksdale <amatus@amatus.name> | 2014-08-22 16:01:49 -0500 |
---|---|---|
committer | David Barksdale <amatus@amatus.name> | 2015-07-06 14:09:24 -0500 |
commit | 28ec807763c2ab8479439207588be69571bcc23b (patch) | |
tree | 5c36fde8ea5f96dc41ad11078681609bd44cc406 | |
parent | d0335e4feea0d3f7a8af3116c5dc166239da7521 (diff) |
Ported over SATA and DMA drivers and configsmybooklive-3.16.y
This kernel+dtb boots on my My Book Live and
is able to access the SATA hard drive.
-rw-r--r-- | arch/powerpc/boot/dts/apollo3g.dts | 419 | ||||
-rw-r--r-- | arch/powerpc/configs/44x/apollo_3G_nas_defconfig | 2701 | ||||
-rw-r--r-- | arch/powerpc/include/asm/apm82181-adma.h | 311 | ||||
-rw-r--r-- | drivers/ata/Kconfig | 9 | ||||
-rw-r--r-- | drivers/ata/Makefile | 1 | ||||
-rw-r--r-- | drivers/ata/sata_dwc_pmp.c | 3041 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 17 | ||||
-rw-r--r-- | drivers/dma/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/apm82181-adma.c | 2201 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/ppc460ex_4chan_dma.c | 1110 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/ppc460ex_4chan_dma.h | 531 |
12 files changed, 10344 insertions, 1 deletions
diff --git a/arch/powerpc/boot/dts/apollo3g.dts b/arch/powerpc/boot/dts/apollo3g.dts new file mode 100644 index 00000000000..c2e2af9d19e --- /dev/null +++ b/arch/powerpc/boot/dts/apollo3g.dts @@ -0,0 +1,419 @@ +/* + * Device Tree for Bluestone (APM821xx) board. + * + * Copyright (c) 2010, Applied Micro Circuits Corporation + * Author: Tirumala R Marri <tmarri@apm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + */ + +/dts-v1/; + +/ { + #address-cells = <2>; + #size-cells = <1>; + model = "apm,bluestone"; + compatible = "apm,bluestone"; + dcr-parent = <&{/cpus/cpu@0}>; + + aliases { + ethernet0 = &EMAC0; + serial0 = &UART0; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + model = "PowerPC,apm821xx"; + reg = <0x00000000>; + clock-frequency = <0>; /* Filled in by U-Boot */ + timebase-frequency = <0>; /* Filled in by U-Boot */ + i-cache-line-size = <32>; + d-cache-line-size = <32>; + i-cache-size = <32768>; + d-cache-size = <32768>; + dcr-controller; + dcr-access-method = "native"; + next-level-cache = <&L2C0>; + }; + }; + + memory { + device_type = "memory"; + reg = <0x00000000 0x00000000 0x00000000>; /* Filled in by U-Boot */ + }; + + UIC0: interrupt-controller0 { + compatible = "ibm,uic"; + interrupt-controller; + cell-index = <0>; + dcr-reg = <0x0c0 0x009>; + #address-cells = <0>; + #size-cells = <0>; + #interrupt-cells = <2>; + }; + + UIC1: interrupt-controller1 { + compatible = "ibm,uic"; + interrupt-controller; + cell-index = <1>; + dcr-reg = <0x0d0 0x009>; + #address-cells = <0>; + #size-cells = <0>; + #interrupt-cells = <2>; + interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ + interrupt-parent = <&UIC0>; + }; + + UIC2: interrupt-controller2 { + compatible = "ibm,uic"; + interrupt-controller; + cell-index = <2>; + dcr-reg = <0x0e0 0x009>; + #address-cells = <0>; + #size-cells = <0>; + #interrupt-cells = <2>; + interrupts = <0xa 0x4 0xb 0x4>; /* cascade */ + interrupt-parent = <&UIC0>; + }; + + UIC3: interrupt-controller3 { + compatible = "ibm,uic"; + interrupt-controller; + cell-index = <3>; + dcr-reg = <0x0f0 0x009>; + #address-cells = <0>; + #size-cells = <0>; + #interrupt-cells = <2>; + interrupts = <0x10 0x4 0x11 0x4>; /* cascade */ + interrupt-parent = <&UIC0>; + }; + + OCM: ocm@400040000 { + compatible = "ibm,ocm"; + status = "ok"; + cell-index = <1>; + /* configured in U-Boot */ + reg = <4 0x00040000 0x8000>; /* 32K */ + }; + + SDR0: sdr { + compatible = "ibm,sdr-apm821xx"; + dcr-reg = <0x00e 0x002>; + }; + + CPR0: cpr { + compatible = "ibm,cpr-apm821xx"; + dcr-reg = <0x00c 0x002>; + }; + + L2C0: l2c { + compatible = "ibm,l2-cache-apm82181", "ibm,l2-cache"; + dcr-reg = <0x020 0x008 + 0x030 0x008>; + cache-line-size = <32>; + cache-size = <262144>; + interrupt-parent = <&UIC1>; + interrupts = <11 1>; + }; + + plb { + compatible = "ibm,plb4"; + #address-cells = <2>; + #size-cells = <1>; + ranges; + clock-frequency = <0>; /* Filled in by U-Boot */ + + SDRAM0: sdram { + compatible = "ibm,sdram-apm821xx"; + dcr-reg = <0x010 0x002>; + }; + + MAL0: mcmal { + compatible = "ibm,mcmal2"; + descriptor-memory = "ocm"; + dcr-reg = <0x180 0x062>; + num-tx-chans = <1>; + num-rx-chans = <1>; + #address-cells = <0>; + #size-cells = <0>; + interrupt-parent = <&UIC2>; + interrupts = < /*TXEOB*/ 0x6 0x4 + /*RXEOB*/ 0x7 0x4 + /*SERR*/ 0x3 0x4 + /*TXDE*/ 0x4 0x4 + /*RXDE*/ 0x5 0x4 + /*TX0 COAL*/ 0x8 0x2 + /*TX1 COAL 0x9 0x2*/ + /*RX0 COAL*/ 0xc 0x2 + /*RX1 COAL 0xd 0x2*/>; + }; + + /* SATA DWC devices */ + SATA0: sata@bffd1000 { + compatible = "amcc,sata-460ex"; + reg = <4 0xbffd1000 0x800 /* SATA0 */ + 4 0xbffd0800 0x400>; /* AHBDMA */ + dma-channel=<0>; + interrupt-parent = <&UIC0>; + interrupts = <26 4 /* SATA0 */ + 25 4>; /* AHBDMA */ + }; + + SATA1: sata@bffd1800 { + compatible = "amcc,sata-460ex"; + reg = <4 0xbffd1800 0x800 /* SATA1 */ + 4 0xbffd0800 0x400>; /* AHBDMA */ + dma-channel=<1>; + interrupt-parent = <&UIC0>; + interrupts = <27 4 /* SATA1 */ + 25 4>; /* AHBDMA */ + }; + + ADMA: adma { + compatible = "amcc,apm82181-adma"; + device_type = "dma"; + #address-cells = <2>; + #size-cells = <1>; + + /*dma-4channel@0{ + compatible = "amcc,apm82181-dma-4channel"; + cell-index = <0>; + label = "plb_dma0"; + interrupt-parent = <&UIC0>; + interrupts = <0xc 0x4>; + pool_size = <0x4000>; + dcr-reg = <0x200 0x207>; + };*/ + + dma-4channel@1 { + compatible = "amcc,apm82181-dma-4channel"; + cell-index = <1>; + label = "plb_dma1"; + interrupt-parent = <&UIC0>; + interrupts = <0xd 0x4>; + pool_size = <0x4000>; + dcr-reg = <0x208 0x20f>; + }; + dma-4channel@2 { + compatible = "amcc,apm82181-dma-4channel"; + cell-index = <2>; + label = "plb_dma2"; + interrupt-parent = <&UIC0>; + interrupts = <0xe 0x4>; + pool_size = <0x4000>; + dcr-reg = <0x210 0x217>; + }; + dma-4channel@3 { + compatible = "amcc,apm82181-dma-4channel"; + cell-index = <3>; + label = "plb_dma3"; + interrupt-parent = <&UIC0>; + interrupts = <0xf 0x4>; + pool_size = <0x4000>; + dcr-reg = <0x218 0x21f>; + }; + }; + + POB0: opb { + compatible = "ibm,opb"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0xb0000000 0x00000004 0xb0000000 0x50000000>; + clock-frequency = <0>; /* Filled in by U-Boot */ + + EBC0: ebc { + compatible = "ibm,ebc"; + dcr-reg = <0x012 0x002>; + #address-cells = <2>; + #size-cells = <1>; + clock-frequency = <0>; /* Filled in by U-Boot */ + /* ranges property is supplied by U-Boot */ + ranges = < 0x00000003 0x00000000 0xe0000000 0x8000000>; + interrupts = <0x6 0x4>; + interrupt-parent = <&UIC1>; + + nor_flash@0,0 { + compatible = "amd,s29gl512n", "jedec-flash", "cfi-flash"; + bank-width = <1>; + reg = <0x00000000 0x00000000 0x00080000>; + #address-cells = <1>; + #size-cells = <1>; + partition@0 { + label = "3genv"; + reg = <0x00000000 0x20000>; + }; + partition@1 { + label = "u-boot"; + reg = <0x20000 0x60000>; + }; + }; + + ndfc@1,0 { + compatible = "ibm,ndfc"; + reg = <0x00000003 0x00000000 0x00002000>; + ccr = <0x00001000>; + bank-settings = <0x80002222>; + #address-cells = <1>; + #size-cells = <1>; + /* 2Gb Nand Flash */ + nand { + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "firmware"; + reg = <0x00000000 0x00C00000>; + }; + partition@c00000 { + label = "environment"; + reg = <0x00C00000 0x00B00000>; + }; + partition@1700000 { + label = "kernel"; + reg = <0x01700000 0x00E00000>; + }; + partition@2500000 { + label = "root"; + reg = <0x02500000 0x08200000>; + }; + partition@a700000 { + label = "device-tree"; + reg = <0x0A700000 0x00B00000>; + }; + partition@b200000 { + label = "config"; + reg = <0x0B200000 0x00D00000>; + }; + partition@bf00000 { + label = "diag"; + reg = <0x0BF00000 0x00C00000>; + }; + partition@cb00000 { + label = "vendor"; + reg = <0x0CB00000 0x3500000>; + }; + }; + }; + }; + + UART0: serial@ef600300 { + device_type = "serial"; + compatible = "ns16550"; + reg = <0xef600300 0x00000008>; + virtual-reg = <0xef600300>; + clock-frequency = <0>; /* Filled in by U-Boot */ + current-speed = <0>; /* Filled in by U-Boot */ + interrupt-parent = <&UIC1>; + interrupts = <0x1 0x4>; + }; + + IIC0: i2c@ef600700 { + compatible = "ibm,iic"; + reg = <0xef600700 0x00000014>; + interrupt-parent = <&UIC0>; + interrupts = <0x2 0x4>; + }; + + RGMII0: emac-rgmii@ef601500 { + compatible = "ibm,rgmii"; + reg = <0xef601500 0x00000008>; + has-mdio; + }; + + TAH0: emac-tah@ef601350 { + compatible = "ibm,tah"; + reg = <0xef601350 0x00000030>; + }; + + EMAC0: ethernet@ef600c00 { + device_type = "network"; + compatible = "ibm,emac-apm821xx", "ibm,emac4sync"; + interrupt-parent = <&EMAC0>; + interrupts = <0x0 0x1>; + #interrupt-cells = <1>; + #address-cells = <0>; + #size-cells = <0>; + interrupt-map = </*Status*/ 0x0 &UIC2 0x10 0x4 + /*Wake*/ 0x1 &UIC2 0x14 0x4>; + reg = <0xef600c00 0x000000c4>; + local-mac-address = [000000000000]; /* Filled in by U-Boot */ + mal-device = <&MAL0>; + mal-tx-channel = <0>; + mal-rx-channel = <0>; + cell-index = <0>; + max-frame-size = <9000>; + rx-fifo-size = <16384>; + tx-fifo-size = <2048>; + phy-mode = "rgmii"; + phy-map = <0x00000000>; + rgmii-device = <&RGMII0>; + rgmii-channel = <0>; + tah-device = <&TAH0>; + tah-channel = <0>; + has-inverted-stacr-oc; + has-new-stacr-staopc; + }; + }; + + DMA: plb_dma@400300200 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "amcc,dma"; + cell-index = <0>; + reg = <4 0x00300200 0x200>; + dcr-reg = <0x100 0x13f>; + interrupt-parent = <&UIC0>; + interrupts = <0 1 2 3>; + interrupt-map = < /*chan 0*/ 0 &UIC0 12 4 + /* chan1*/ 1 &UIC0 13 4 + /* chan2*/ 2 &UIC0 14 4 + /* chan3*/ 3 &UIC0 15 4>; + + + dma-4channel@0{ + compatible = "amcc,dma-4channel"; + cell-index = <0>; + label = "channel0"; + reg = <0x100 0x107>; + }; + /* + dma-4channel@1 { + compatible = "amcc,dma-4channel"; + cell-index = <1>; + label = "channel1"; + reg = <0x108 0x10f>; + }; + dma-4channel@2 { + compatible = "amcc,dma-4channel"; + cell-index = <2>; + label = "channel2"; + reg = <0x110 0x117>; + }; + dma-4channel@3 { + compatible = "amcc,dma-4channel"; + cell-index = <3>; + label = "channel3"; + reg = <0x118 0x11f>; + }; + */ + }; + }; +}; diff --git a/arch/powerpc/configs/44x/apollo_3G_nas_defconfig b/arch/powerpc/configs/44x/apollo_3G_nas_defconfig new file mode 100644 index 00000000000..c3f97f30943 --- /dev/null +++ b/arch/powerpc/configs/44x/apollo_3G_nas_defconfig @@ -0,0 +1,2701 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/powerpc 3.16.0 Kernel Configuration +# +# CONFIG_PPC64 is not set + +# +# Processor support +# +# CONFIG_PPC_BOOK3S_32 is not set +# CONFIG_PPC_85xx is not set +# CONFIG_PPC_8xx is not set +# CONFIG_40x is not set +CONFIG_44x=y +# CONFIG_E200 is not set +CONFIG_PPC_FPU=y +CONFIG_4xx=y +CONFIG_BOOKE=y +CONFIG_PTE_64BIT=y +CONFIG_PHYS_64BIT=y +CONFIG_PPC_MMU_NOHASH=y +# CONFIG_PPC_MM_SLICES is not set +CONFIG_NOT_COHERENT_CACHE=y +# CONFIG_PPC_DOORBELL is not set +CONFIG_CPU_BIG_ENDIAN=y +# CONFIG_CPU_LITTLE_ENDIAN is not set +CONFIG_PPC32=y +CONFIG_32BIT=y +CONFIG_WORD_SIZE=32 +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_MMU=y +# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set +# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set +CONFIG_NR_IRQS=512 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_ILOG2_U32=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_PPC=y +# CONFIG_GENERIC_CSUM is not set +CONFIG_EARLY_PRINTK=y +CONFIG_PANIC_TIMEOUT=0 +CONFIG_GENERIC_NVRAM=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_PPC_OF=y +CONFIG_PPC_UDBG_16550=y +# CONFIG_GENERIC_TBSYNC is not set +CONFIG_AUDIT_ARCH=y +CONFIG_GENERIC_BUG=y +# CONFIG_EPAPR_BOOT is not set +# CONFIG_DEFAULT_UIMAGE is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_PPC_DCR_NATIVE=y +# CONFIG_PPC_DCR_MMIO is not set +CONFIG_PPC_DCR=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_PPC_ADV_DEBUG_REGS=y +CONFIG_PPC_ADV_DEBUG_IACS=4 +CONFIG_PPC_ADV_DEBUG_DACS=2 +CONFIG_PPC_ADV_DEBUG_DVCS=2 +CONFIG_PPC_ADV_DEBUG_DAC_RANGE=y +CONFIG_PPC_EMULATE_SSTEP=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_IRQ_WORK=y + +# +# General setup +# +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_FHANDLE=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_IRQ_DOMAIN=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_GENERIC_TIME_VSYSCALL_OLD=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ is not set +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y + +# +# RCU Subsystem +# +CONFIG_TINY_RCU=y +# CONFIG_PREEMPT_RCU is not set +# CONFIG_RCU_STALL_COMMON is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +# CONFIG_MEMCG_SWAP_ENABLED is not set +# CONFIG_MEMCG_KMEM is not set +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_EXPERT=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_PCI_QUIRKS=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_SYSTEM_TRUSTED_KEYRING is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +CONFIG_OPROFILE=m +CONFIG_HAVE_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_UPROBES=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_ATTRS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND=y +CONFIG_OLD_SIGACTION=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_CMDLINE_PARSER is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_FREEZER=y +CONFIG_PPC4xx_PCI_EXPRESS=y +# CONFIG_PPC4xx_HSTA_MSI is not set +CONFIG_PPC4xx_MSI=y +CONFIG_PPC_MSI_BITMAP=y +# CONFIG_PPC_XICS is not set +# CONFIG_PPC_ICP_NATIVE is not set +# CONFIG_PPC_ICP_HV is not set +# CONFIG_PPC_ICS_RTAS is not set +# CONFIG_GE_FPGA is not set + +# +# Platform support +# +# CONFIG_PPC_CELL is not set +# CONFIG_PPC_CELL_NATIVE is not set +# CONFIG_PQ2ADS is not set +# CONFIG_PPC_47x is not set +# CONFIG_BAMBOO is not set +CONFIG_BLUESTONE=y +# CONFIG_EBONY is not set +# CONFIG_SAM440EP is not set +# CONFIG_SEQUOIA is not set +# CONFIG_TAISHAN is not set +# CONFIG_KATMAI is not set +# CONFIG_RAINIER is not set +# CONFIG_WARP is not set +# CONFIG_ARCHES is not set +CONFIG_CANYONLANDS=y +# CONFIG_GLACIER is not set +# CONFIG_REDWOOD is not set +# CONFIG_EIGER is not set +# CONFIG_YOSEMITE is not set +# CONFIG_ISS4xx is not set +# CONFIG_ICON is not set +# CONFIG_XILINX_VIRTEX440_GENERIC_BOARD is not set +CONFIG_PPC44x_SIMPLE=y +CONFIG_PPC4xx_GPIO=y +CONFIG_PPC4xx_OCM=y +CONFIG_460EX=y +CONFIG_APM821xx=y +# CONFIG_KVM_GUEST is not set +# CONFIG_EPAPR_PARAVIRT is not set +# CONFIG_IPIC is not set +# CONFIG_MPIC is not set +# CONFIG_PPC_EPAPR_HV_PIC is not set +# CONFIG_MPIC_WEIRD is not set +# CONFIG_PPC_I8259 is not set +# CONFIG_PPC_RTAS is not set +# CONFIG_MMIO_NVRAM is not set +# CONFIG_MPIC_U3_HT_IRQS is not set +# CONFIG_PPC_MPC106 is not set +# CONFIG_PPC_970_NAP is not set +# CONFIG_PPC_P7_NAP is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=m +# CONFIG_CPU_FREQ_STAT_DETAILS is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_ONDEMAND=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m + +# +# PowerPC CPU frequency scaling drivers +# + +# +# CPUIdle driver +# + +# +# CPU Idle +# +# CONFIG_CPU_IDLE is not set +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set +# CONFIG_FSL_ULI1575 is not set +# CONFIG_SIMPLE_GPIO is not set + +# +# Kernel options +# +CONFIG_HIGHMEM=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_BINFMT_ELF=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# CONFIG_MATH_EMULATION is not set +# CONFIG_IOMMU_HELPER is not set +# CONFIG_SWIOTLB is not set +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_HAS_WALK_MEMORY=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_KEXEC=y +# CONFIG_CRASH_DUMP is not set +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_ZONE_DMA_FLAG=1 +CONFIG_BOUNCE=y +CONFIG_NEED_BOUNCE_POOL=y +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_NEED_PER_CPU_KM=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZBUD is not set +CONFIG_ZSMALLOC=y +# CONFIG_PGTABLE_MAPPING is not set +CONFIG_STDBINUTILS=y +CONFIG_PPC_4K_PAGES=y +# CONFIG_PPC_16K_PAGES is not set +# CONFIG_PPC_64K_PAGES is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttyS0,9600 console=tty0" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EXTRA_TARGETS="" +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM_RUNTIME=y +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_SECCOMP=y +CONFIG_ISA_DMA_API=y + +# +# Bus options +# +CONFIG_ZONE_DMA=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_GENERIC_ISA_DMA=y +CONFIG_PPC_INDIRECT_PCI=y +CONFIG_PPC4xx_CPM=y +CONFIG_4xx_SOC=y +CONFIG_PPC_PCI_CHOICE=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIE_ECRC is not set +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCI_MSI=y +# CONFIG_PCI_DEBUG is not set +CONFIG_PCI_REALLOC_ENABLE_AUTO=y +CONFIG_PCI_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set + +# +# PCI host controller drivers +# +# CONFIG_PCCARD is not set +# CONFIG_HOTPLUG_PCI is not set +# CONFIG_HAS_RAPIDIO is not set +# CONFIG_RAPIDIO is not set +# CONFIG_NONSTATIC_KERNEL is not set + +# +# Advanced setup +# +# CONFIG_ADVANCED_OPTIONS is not set + +# +# Default settings for advanced configuration options are used +# +CONFIG_LOWMEM_SIZE=0x30000000 +CONFIG_PAGE_OFFSET=0xc0000000 +CONFIG_KERNEL_START=0xc0000000 +CONFIG_PHYSICAL_START=0x00000000 +CONFIG_TASK_SIZE=0xc0000000 +CONFIG_CONSISTENT_SIZE=0x00200000 +# CONFIG_ARCH_RANDOM is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_LRO=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_NETLABEL is not set +CONFIG_NETWORK_SECMARK=y +# CONFIG_NET_PTP_CLASSIFY is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=m +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=m +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_QUEUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PROTO_DCCP=m +CONFIG_NF_NAT_PROTO_UDPLITE=m +CONFIG_NF_NAT_PROTO_SCTP=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_META=m +CONFIG_NFT_CT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_HASH=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_NAT=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +CONFIG_NF_TABLES_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NF_TABLES_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_NF_NAT_IPV4=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_REJECT_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_NF_NAT_IPV6=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m + +# +# DECnet: Netfilter Configuration +# +CONFIG_DECNET_NF_GRABULATOR=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_ULOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +CONFIG_IP_DCCP_CCID3=y +# CONFIG_IP_DCCP_CCID3_DEBUG is not set +CONFIG_IP_DCCP_TFRC_LIB=y + +# +# DCCP Kernel Hacking +# +# CONFIG_IP_DCCP_DEBUG is not set +CONFIG_NET_DCCPPROBE=m +CONFIG_IP_SCTP=m +CONFIG_NET_SCTPPROBE=m +# CONFIG_SCTP_DBG_OBJCNT is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_RDS=m +CONFIG_RDS_TCP=m +# CONFIG_RDS_DEBUG is not set +CONFIG_TIPC=m +CONFIG_TIPC_PORTS=8191 +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_DECNET=m +CONFIG_DECNET_ROUTER=y +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_IPX=m +CONFIG_IPX_INTERN=y +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +# CONFIG_X25 is not set +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_6LOWPAN_IPHC=m +# CONFIG_MAC802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_CLS_IND=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +# CONFIG_BATMAN_ADV_MCAST is not set +# CONFIG_BATMAN_ADV_DEBUG is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=y +CONFIG_OPENVSWITCH_VXLAN=y +# CONFIG_VSOCKETS is not set +CONFIG_NETLINK_MMAP=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +# CONFIG_HSR is not set +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NET_TCPPROBE is not set +CONFIG_NET_DROP_MONITOR=m +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_AF_RXRPC=m +# CONFIG_AF_RXRPC_DEBUG is not set +CONFIG_RXKAD=m +CONFIG_FIB_RULES=y +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +# CONFIG_NFC is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +# CONFIG_DMA_SHARED_BUFFER is not set + +# +# Bus devices +# +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_MTD is not set +CONFIG_DTC=y +CONFIG_OF=y + +# +# Device Tree and Open Firmware support +# +# CONFIG_OF_SELFTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=m +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_ZRAM is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_XILINX_SYSACE is not set +# CONFIG_BLK_DEV_HD is not set +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_BMP085_I2C is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_SRAM is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# +# CONFIG_ECHO is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_TGT=m +CONFIG_SCSI_NETLINK=y +# CONFIG_SCSI_PROC_FS is not set + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_FC_TGT_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y +# CONFIG_SCSI_LOWLEVEL is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +# CONFIG_SATA_AHCI is not set +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_DWC is not set +CONFIG_SATA_DWC_PMP=y +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_MQ=m +CONFIG_DM_CACHE_CLEANER=m +# CONFIG_DM_ERA is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +# CONFIG_TARGET_CORE is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_I2O is not set +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +CONFIG_DUMMY=m +CONFIG_EQUALIZER=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VXLAN=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_NLMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set +# CONFIG_NET_DSA_MV88E6131 is not set +# CONFIG_NET_DSA_MV88E6123_61_65 is not set +CONFIG_ETHERNET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_CALXEDA_XGMAC is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +CONFIG_NET_VENDOR_IBM=y +CONFIG_IBM_EMAC=y +CONFIG_IBM_EMAC_RXB=128 +CONFIG_IBM_EMAC_TXB=64 +CONFIG_IBM_EMAC_POLL_WEIGHT=32 +CONFIG_IBM_EMAC_RX_COPY_THRESHOLD=256 +CONFIG_IBM_EMAC_RX_SKB_HEADROOM=0 +# CONFIG_IBM_EMAC_DEBUG is not set +CONFIG_IBM_EMAC_ZMII=y +CONFIG_IBM_EMAC_RGMII=y +CONFIG_IBM_EMAC_TAH=y +CONFIG_IBM_EMAC_EMAC4=y +# CONFIG_IBM_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_EMAC_MAL_COMMON_ERR is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_IP1000 is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_SH_ETH is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_SFC is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +CONFIG_PHYLIB=m + +# +# MII PHY device drivers +# +CONFIG_AT803X_PHY=m +CONFIG_AMD_PHY=m +# CONFIG_AMD_XGBE_PHY is not set +CONFIG_MARVELL_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_LXT_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM7XXX_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_STE10XP=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MICREL_PHY=m +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_IEEE802154_DRIVERS is not set +# CONFIG_VMXNET3 is not set +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_PPC_EPAPR_HV_BYTECHAN is not set +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set +CONFIG_SERIAL_8250_FSL=y +# CONFIG_SERIAL_8250_DW is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MFD_HSU is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_OF_PLATFORM is not set +# CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_UDBG is not set +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=m +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_PPC4XX=m +CONFIG_NVRAM=y +# CONFIG_GEN_RTC is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_DEVPORT=y +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_IBM_IIC=m +# CONFIG_I2C_MPC is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_RK3X is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +# CONFIG_PTP_1588_CLOCK is not set + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_DEVRES=y +CONFIG_OF_GPIO=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO drivers: +# +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_GRGPIO is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set + +# +# PCI GPIO expanders: +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MCP23S08 is not set + +# +# AC97 GPIO expanders: +# + +# +# LPC GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +# CONFIG_GPIO_BCM_KONA is not set + +# +# USB GPIO expanders: +# +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_POWER_AVS is not set +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_HTU21 is not set +# CONFIG_SENSORS_MCP3021 is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y + +# +# Broadcom specific AMBA +# +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_AXP20X is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TIMBERDALE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_AGP is not set +# CONFIG_VGA_ARB is not set + +# +# Direct Rendering Manager +# +# CONFIG_DRM is not set + +# +# Frame buffer Devices +# +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set +# CONFIG_VGASTATE is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +# CONFIG_SOUND is not set + +# +# HID support +# +# CONFIG_HID is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SUPPORT is not set +# CONFIG_UWB is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +# CONFIG_EDAC is not set +# CONFIG_RTC_CLASS is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +# CONFIG_DW_DMAC_CORE is not set +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +CONFIG_AMCC_PPC460EX_460GT_4CHAN_DMA=y +CONFIG_APM82181_ADMA=y +CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL=y +# CONFIG_FSL_EDMA is not set +CONFIG_DMA_ENGINE=y +CONFIG_DMA_OF=y + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +# CONFIG_VIRT_DRIVERS is not set + +# +# Virtio drivers +# +# CONFIG_VIRTIO_PCI is not set +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_STAGING is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Hardware Spinlock drivers +# +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_SUPPORT=y +CONFIG_OF_IOMMU=y + +# +# Remoteproc drivers +# +# CONFIG_STE_MODEM_RPROC is not set + +# +# Rpmsg drivers +# +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set +CONFIG_IRQCHIP=y +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_DEFAULTS_TO_ORDERED=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QUOTACTL=y +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# Caches +# +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="utf8" +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_CONFIGFS_FS=m +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V2=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_BLOCK=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_ROOT_NFS=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +# CONFIG_NFSD is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SUNRPC_DEBUG is not set +# CONFIG_CEPH_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_PERCPU_RWSEM=y +CONFIG_CRC_CCITT=m +CONFIG_CRC16=m +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +# CONFIG_CRC8 is not set +# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_XZ_DEC=y +# CONFIG_XZ_DEC_X86 is not set +CONFIG_XZ_DEC_POWERPC=y +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DQL=y +CONFIG_NLATTR=y +CONFIG_GENERIC_ATOMIC64=y +CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y +CONFIG_AVERAGE=y +CONFIG_CORDIC=m +# CONFIG_DDR is not set +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +CONFIG_UNUSED_SYMBOLS=y +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x01b6 +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_HIGHMEM is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +CONFIG_TIMER_STATS=y + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_TORTURE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_FTRACE_SYSCALLS is not set +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENT=y +CONFIG_UPROBE_EVENT=y +CONFIG_PROBE_EVENTS=y +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set + +# +# Runtime Testing +# +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_TEST_MODULE is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_PPC_DISABLE_WERROR=y +CONFIG_PRINT_STACK_DEPTH=64 +# CONFIG_PPC_EMULATED_STATS is not set +# CONFIG_CODE_PATCHING_SELFTEST is not set +# CONFIG_FTR_FIXUP_SELFTEST is not set +# CONFIG_MSI_BITMAP_SELFTEST is not set +CONFIG_XMON=y +# CONFIG_XMON_DEFAULT is not set +CONFIG_XMON_DISASSEMBLY=y +CONFIG_DEBUGGER=y +# CONFIG_BDI_SWITCH is not set +# CONFIG_PPC_EARLY_DEBUG is not set +CONFIG_STRICT_DEVMEM=y + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +CONFIG_KEYS_DEBUG_PROC_KEYS=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=32768 +CONFIG_SECURITY_SELINUX=y +# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set +# CONFIG_SECURITY_SELINUX_DISABLE is not set +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set +# CONFIG_SECURITY_SMACK is not set +CONFIG_SECURITY_TOMOYO=y +CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048 +CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024 +# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set +CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init" +CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init" +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_YAMA_STACKED=y +# CONFIG_IMA is not set +# CONFIG_EVM is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set +# CONFIG_DEFAULT_SECURITY_YAMA is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=m +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=m +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=m +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_SEQIV=m + +# +# Block modes +# +CONFIG_CRYPTO_CBC=m +CONFIG_CRYPTO_CTR=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_PPC=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_ZLIB=m +CONFIG_CRYPTO_LZO=m +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_USER_API=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_PPC4XX is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set +CONFIG_PPC_LIB_RHEAP=y +# CONFIG_VIRTUALIZATION is not set diff --git a/arch/powerpc/include/asm/apm82181-adma.h b/arch/powerpc/include/asm/apm82181-adma.h new file mode 100644 index 00000000000..8d36b517fbf --- /dev/null +++ b/arch/powerpc/include/asm/apm82181-adma.h @@ -0,0 +1,311 @@ +/* + * 2009-2010 (C) Applied Micro Circuits Corporation. + * + * Author: Tai Tri Nguyen<ttnguyen@appliedmicro.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of + * any kind, whether express or implied. + */ + +#ifndef APM82181_ADMA_H +#define APM82181_ADMA_H + + +#include <linux/types.h> + + +#define to_apm82181_adma_chan(chan) container_of(chan,apm82181_ch_t,common) +#define to_apm82181_adma_device(dev) container_of(dev,apm82181_dev_t,common) +#define tx_to_apm82181_adma_slot(tx) container_of(tx,apm82181_desc_t,async_tx) + +#define APM82181_DMA_PROC_ROOT "driver/apm82181_adma" + +/* Number of operands supported in the h/w */ +#define XOR_MAX_OPS 16 +/* this is the XOR_CBBCR width */ +#define APM82181_ADMA_XOR_MAX_BYTE_COUNT (1 << 31) +#define APM82181_ADMA_DMA_MAX_BYTE_COUNT 1024 * 1024 +#define MAX_APM82181_DMA_CHANNELS 5 +#define APM82181_ADMA_THRESHOLD 1 + +#define APM82181_PDMA0_ID 0 +#define APM82181_PDMA1_ID 1 +#define APM82181_PDMA2_ID 2 +#define APM82181_PDMA3_ID 3 +#define APM82181_XOR_ID 4 + +/* DMA 0/1/2/3 registers */ +#define DCR_DMAx_BASE(x) (0x200 + x*0x8) /* DMA DCR base */ +#define DCR_DMA2P40_CRx(x) (DCR_DMAx_BASE(x) + 0x0) /* DMA Channel Control */ +#define DMA_CR_CE (1 << 31) +#define DMA_CR_CIE (1 << 30) +#define DMA_CR_PL (1 << 28) +#define DMA_CR_PW_128 0x08000000 +#define DMA_CR_DAI 0x01000000 +#define DMA_CR_SAI 0x00800000 +#define DMA_CR_BEN 0x00400000 +#define DMA_CR_TM_S_MM 0x00300000 +#define DMA_CR_ETD 0x00000100 +#define DMA_CR_TCE 0x00000080 +#define DMA_CR_CP(x) (x<<5)& 0x00000060 +#define DMA_CR_DEC (1 << 2) +#define DMA_CR_SL (1 << 1) +#define DCR_DMA2P40_CTCx(x) (DCR_DMAx_BASE(x) + 0x1) /* DMA Count 0 */ +#define DMA_CTC_ETIE (1 << 28) +#define DMA_CTC_EIE (1 << 27) +#define DMA_CTC_PCE (1 << 20) +#define DMA_CTC_TC_MASK 0x000fffff +#define DCR_DMA2P40_SAHx(x) (DCR_DMAx_BASE(x) + 0x2) /* DMA Src Addr High 0 */ +#define DCR_DMA2P40_SALx(x) (DCR_DMAx_BASE(x) + 0x3) /* DMA Src Addr Low 0 */ +#define DCR_DMA2P40_DAHx(x) (DCR_DMAx_BASE(x) + 0x4) /* DMA Dest Addr High 0 */ +#define DCR_DMA2P40_DALx(x) (DCR_DMAx_BASE(x) + 0x5) /* DMA Dest Addr Low 0 */ +#define DCR_DMA2P40_SGHx(x) (DCR_DMAx_BASE(x) + 0x6) /* DMA SG Desc Addr High 0 */ +#define DCR_DMA2P40_SGLx(x) (DCR_DMAx_BASE(x) + 0x7) /* DMA SG Desc Addr Low 0 */ +/* DMA Status Register */ +#define DCR_DMA2P40_SR 0x220 +#define DMA_SR_CS(x) (1 << (31 -x)) +#define DMA_SR_TS(x) (1 << (27 -x)) +#define DMA_SR_RI(x) (1 << (23 -x)) +#define DMA_SR_IR(x) (1 << (19 -x)) +#define DMA_SR_ER(x) (1 << (15 -x)) +#define DMA_SR_CB(x) (1 << (11 -x)) +#define DMA_SR_SG(x) (1 << (7 -x)) +/* S/G registers */ +#define DCR_DMA2P40_SGC 0x223 +#define DMA_SGC_SSG(x) ( 1 << (31 - x)) +#define DMA_SGC_SGL(x,y) ( y << (27 - x)) /* x: channel; y: 0 PLB, 1 OPB*/ +#define DMA_SGC_EM(x) ( 1 << (15 - x)) +#define DMA_SGC_EM_ALL 0x0000F000 + +/* + * XOR Command Block Control Register bits + */ +#define XOR_CBCR_LNK_BIT (1<<31) /* link present */ +#define XOR_CBCR_TGT_BIT (1<<30) /* target present */ +#define XOR_CBCR_CBCE_BIT (1<<29) /* command block compete enable */ +#define XOR_CBCR_RNZE_BIT (1<<28) /* result not zero enable */ +#define XOR_CBCR_XNOR_BIT (1<<15) /* XOR/XNOR */ +#define XOR_CDCR_OAC_MSK (0x7F) /* operand address count */ + +/* + * XORCore Status Register bits + */ +#define XOR_SR_XCP_BIT (1<<31) /* core processing */ +#define XOR_SR_ICB_BIT (1<<17) /* invalid CB */ +#define XOR_SR_IC_BIT (1<<16) /* invalid command */ +#define XOR_SR_IPE_BIT (1<<15) /* internal parity error */ +#define XOR_SR_RNZ_BIT (1<<2) /* result not Zero */ +#define XOR_SR_CBC_BIT (1<<1) /* CB complete */ +#define XOR_SR_CBLC_BIT (1<<0) /* CB list complete */ + +/* + * XORCore Control Set and Reset Register bits + */ +#define XOR_CRSR_XASR_BIT (1<<31) /* soft reset */ +#define XOR_CRSR_XAE_BIT (1<<30) /* enable */ +#define XOR_CRSR_RCBE_BIT (1<<29) /* refetch CB enable */ +#define XOR_CRSR_PAUS_BIT (1<<28) /* pause */ +#define XOR_CRSR_64BA_BIT (1<<27) /* 64/32 CB format */ +#define XOR_CRSR_CLP_BIT (1<<25) /* continue list processing */ + +/* + * XORCore Interrupt Enable Register + */ +#define XOR_IE_ICBIE_BIT (1<<17) /* Invalid Command Block Interrupt Enable */ +#define XOR_IE_ICIE_BIT (1<<16) /* Invalid Command Interrupt Enable */ +#define XOR_IE_RPTIE_BIT (1<<14) /* Read PLB Timeout Error Interrupt Enable */ +#define XOR_IE_CBCIE_BIT (1<<1) /* CB complete interrupt enable */ +#define XOR_IE_CBLCI_BIT (1<<0) /* CB list complete interrupt enable */ + +typedef struct apm82181_plb_dma4_device { + struct resource reg; /* Resource for register */ + void __iomem *reg_base; + struct platform_device *ofdev; + struct device *dev; +} apm82181_plb_dma_t; + +/** + * struct apm82181_dma_device - internal representation of an DMA device + * @id: HW DMA Device selector + * @ofdev: OF device + * @dcr_base: dcr base of HW PLB DMA channels + * @reg_base: base of ADMA XOR channel + * @dma_desc_pool: base of DMA descriptor region (DMA address) + * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) + * @pool_size: memory pool size for the channel device + * @common: embedded struct dma_device + * @cap_mask: capabilities of ADMA channels + */ +typedef struct apm82181_plb_dma_device { + int id; + struct platform_device *ofdev; + u32 dcr_base; + void __iomem *xor_base; + struct device *dev; + struct dma_device common; + struct apm82181_plb_dma4_device *pdma; + void *dma_desc_pool_virt; + u32 pool_size; + dma_addr_t dma_desc_pool; + dma_cap_mask_t cap_mask; +} apm82181_dev_t; + +/** + * struct apm82181_dma_chan - internal representation of an ADMA channel + * @lock: serializes enqueue/dequeue operations to the slot pool + * @device: parent device + * @chain: device chain view of the descriptors + * @common: common dmaengine channel object members + * @all_slots: complete domain of slots usable by the channel + * @reg: Resource for register + * @pending: allows batching of hardware operations + * @completed_cookie: identifier for the most recently completed operation + * @slots_allocated: records the actual size of the descriptor slot pool + * @hw_chain_inited: h/w descriptor chain initialization flag + * @irq_tasklet: bottom half where apm82181_adma_slot_cleanup runs + * @needs_unmap: if buffers should not be unmapped upon final processing + */ +typedef struct apm82181_plb_dma_chan { + spinlock_t lock; + struct apm82181_plb_dma_device *device; + struct timer_list cleanup_watchdog; + struct list_head chain; + struct dma_chan common; + struct list_head all_slots; + struct apm82181_adma_plb_desc_slot *last_used; + int pending; + dma_cookie_t completed_cookie; + int slots_allocated; + int hw_chain_inited; + struct tasklet_struct irq_tasklet; + u8 needs_unmap; + phys_addr_t current_cdb_addr; +} apm82181_ch_t; + +typedef struct apm82181_adma_plb_desc_slot { + dma_addr_t phys; + struct apm82181_adma_plb_desc_slot *group_head; + struct apm82181_adma_plb_desc_slot *hw_next; + struct dma_async_tx_descriptor async_tx; + struct list_head slot_node; + struct list_head chain_node; + struct list_head group_list; + unsigned int unmap_len; + void *hw_desc; + u16 stride; + u16 idx; + u16 slot_cnt; + u8 src_cnt; + u8 dst_cnt; + u8 slots_per_op; + u8 descs_per_op; + unsigned long flags; + unsigned long reverse_flags[8]; +#define APM82181_DESC_INT 0 /* generate interrupt on complete */ +#define APM82181_DESC_FENCE 1 /* Other tx will use its result */ + /* This tx needs to be polled to complete */ + +}apm82181_desc_t; + +typedef struct { + u32 ce:1; + u32 cie:1; + u32 td:1; + u32 pl:1; + u32 pw:3; + u32 dai:1; + u32 sai:1; + u32 ben:1; + u32 tm:2; + u32 psc:2; + u32 pwc:6; + u32 phc:3; + u32 etd:1; + u32 tce:1; + u32 cp:2; + u32 pf:2; + u32 dec:1; + u32 sl:1; + u32 reserved:1; +} __attribute__((packed)) dma_cdb_ctrl_t; + +typedef struct { + u32 link:1; + u32 sgl:1; + u32 tcie:1; + u32 etie:1; + u32 eie:1; + u32 sid:3; + u32 bten:1; + u32 bsiz:2; + u32 pce:1; + u32 tc:20; +} __attribute__((packed)) dma_cdb_count_t; +/* scatter/gather descriptor struct */ +typedef struct dma_cdb { + dma_cdb_ctrl_t ctrl; + dma_cdb_count_t cnt; + u32 src_hi; + u32 src_lo; + u32 dest_hi; + u32 dest_lo; + u32 sg_hi; + u32 sg_lo; +}dma_cdb_t; + +typedef struct { + uint32_t control; + phys_addr_t src_addr; + phys_addr_t dst_addr; + uint32_t control_count; + uint32_t next; +} ppc_sgl_t; + +/* + * XOR Accelerator engine Command Block Type + */ +typedef struct { + /* + * Basic 64-bit format XOR CB + */ + u32 cbc; /* control */ + u32 cbbc; /* byte count */ + u32 cbs; /* status */ + u8 pad0[4]; /* reserved */ + u32 cbtah; /* target address high */ + u32 cbtal; /* target address low */ + u32 cblah; /* link address high */ + u32 cblal; /* link address low */ + struct { + u32 h; + u32 l; + } __attribute__ ((packed)) ops [16]; +} __attribute__ ((packed)) xor_cb_t; + +/* + * XOR hardware registers + */ +typedef struct { + u32 op_ar[16][2]; /* operand address[0]-high,[1]-low registers */ + u8 pad0[352]; /* reserved */ + u32 cbcr; /* CB control register */ + u32 cbbcr; /* CB byte count register */ + u32 cbsr; /* CB status register */ + u8 pad1[4]; /* reserved */ + u32 cbtahr; /* operand target address high register */ + u32 cbtalr; /* operand target address low register */ + u32 cblahr; /* CB link address high register */ + u32 cblalr; /* CB link address low register */ + u32 crsr; /* control set register */ + u32 crrr; /* control reset register */ + u32 ccbahr; /* current CB address high register */ + u32 ccbalr; /* current CB address low register */ + u32 plbr; /* PLB configuration register */ + u32 ier; /* interrupt enable register */ + u32 pecr; /* parity error count register */ + u32 sr; /* status register */ + u32 revidr; /* revision ID register */ +} xor_regs_t; + +#endif diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 7671dbac601..2664da32d9d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -278,6 +278,15 @@ config SATA_DWC_VDEBUG help This option enables the taskfile dumping and NCQ debugging. +config SATA_DWC_PMP + tristate "DesignWare Cores SATA with PMP support" + depends on 460EX + help + This option enables support for the on-chip SATA controller of the + AppliedMicro processor 460EX with PMP support. + + If unsure, say N. + config SATA_HIGHBANK tristate "Calxeda Highbank SATA support" depends on ARCH_HIGHBANK || COMPILE_TEST diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 5a02aeecef5..7e7a77de757 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_SATA_FSL) += sata_fsl.o obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o obj-$(CONFIG_SATA_SIL24) += sata_sil24.o obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o +obj-$(CONFIG_SATA_DWC_PMP) += sata_dwc_pmp.o obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o diff --git a/drivers/ata/sata_dwc_pmp.c b/drivers/ata/sata_dwc_pmp.c new file mode 100644 index 00000000000..3ff190af8d2 --- /dev/null +++ b/drivers/ata/sata_dwc_pmp.c @@ -0,0 +1,3041 @@ +/* + * drivers/ata/sata_dwc.c + * + * Synopsys DesignWare Cores (DWC) SATA host driver + * + * Author: Mark Miesfeld <mmiesfeld@amcc.com> + * + * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> + * Copyright 2008 DENX Software Engineering + * + * Based on versions provided by AMCC and Synopsys which are: + * Copyright 2006 Applied Micro Circuits Corporation + * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/libata.h> +#include <linux/rtc.h> + +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> + + +#ifdef CONFIG_SATA_DWC_DEBUG +#define dwc_dev_dbg(dev, format, arg...) \ + ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; }) +#define dwc_port_dbg(ap, format, arg...) \ + ata_port_printk(ap, KERN_INFO, format, ##arg) +#define dwc_link_dbg(link, format, arg...) \ + ata_link_printk(link, KERN_INFO, format, ##arg) +#else +#define dwc_dev_dbg(dev, format, arg...) \ + ({ 0; }) +#define dwc_port_dbg(ap, format, arg...) \ + ({ 0; }) +#define dwc_link_dbg(link, format, arg...) \ + ({ 0; }) +#endif + +#ifdef CONFIG_SATA_DWC_VDEBUG +#define DEBUG_NCQ +#define dwc_dev_vdbg(dev, format, arg...) \ + ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; }) +#define dwc_port_vdbg(ap, format, arg...) \ + ata_port_printk(ap, KERN_INFO, format, ##arg) +#define dwc_link_vdbg(link, format, arg...) \ + ata_link_printk(link, KERN_INFO, format, ##arg) +#else +#define dwc_dev_vdbg(dev, format, arg...) \ + ({ 0; }) +#define dwc_port_vdbg(ap, format, arg...) \ + ({ 0; }) +#define dwc_link_vdbg(link, format, arg...) \ + ({ 0; }) +#endif + +#define dwc_dev_info(dev, format, arg...) \ + ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; }) +#define dwc_port_info(ap, format, arg...) \ + ata_port_printk(ap, KERN_INFO, format, ##arg) +#define dwc_link_info(link, format, arg...) \ + ata_link_printk(link, KERN_INFO, format, ##arg) + +/* These two are defined in "libata.h" */ +#undef DRV_NAME +#undef DRV_VERSION +#define DRV_NAME "sata-dwc" +#define DRV_VERSION "2.0" + +/* Port Multiplier discovery Signature */ +#define PSCR_SCONTROL_DET_ENABLE 0x00000001 +#define PSCR_SSTATUS_DET_PRESENT 0x00000001 +#define PSCR_SERROR_DIAG_X 0x04000000 + +/* Port multiplier port entry in SCONTROL register */ +#define SCONTROL_PMP_MASK 0x000f0000 +#define PMP_TO_SCONTROL(p) ((p << 16) & 0x000f0000) +#define SCONTROL_TO_PMP(p) (((p) & 0x000f0000) >> 16) + + +/* SATA DMA driver Globals */ +#if defined(CONFIG_APM821xx) +#define DMA_NUM_CHANS 2 +#else +#define DMA_NUM_CHANS 1 +#endif + +#define DMA_NUM_CHAN_REGS 8 + +/* SATA DMA Register definitions */ +#if defined(CONFIG_APM821xx) +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ +#else +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ +#endif + +#if defined(CONFIG_APOLLO3G) +extern void signal_hdd_led(int, int); +#endif +struct dmareg { + u32 low; /* Low bits 0-31 */ + u32 high; /* High bits 32-63 */ +}; + +/* DMA Per Channel registers */ + +struct dma_chan_regs { + struct dmareg sar; /* Source Address */ + struct dmareg dar; /* Destination address */ + struct dmareg llp; /* Linked List Pointer */ + struct dmareg ctl; /* Control */ + struct dmareg sstat; /* Source Status not implemented in core */ + struct dmareg dstat; /* Destination Status not implemented in core */ + struct dmareg sstatar; /* Source Status Address not impl in core */ + struct dmareg dstatar; /* Destination Status Address not implemented */ + struct dmareg cfg; /* Config */ + struct dmareg sgr; /* Source Gather */ + struct dmareg dsr; /* Destination Scatter */ +}; + +/* Generic Interrupt Registers */ +struct dma_interrupt_regs { + struct dmareg tfr; /* Transfer Interrupt */ + struct dmareg block; /* Block Interrupt */ + struct dmareg srctran; /* Source Transfer Interrupt */ + struct dmareg dsttran; /* Dest Transfer Interrupt */ + struct dmareg error; /* Error */ +}; + +struct ahb_dma_regs { + struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS]; + struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */ + struct dma_interrupt_regs interrupt_status; /* Interrupt Status */ + struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */ + struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */ + struct dmareg statusInt; /* Interrupt combined */ + struct dmareg rq_srcreg; /* Src Trans Req */ + struct dmareg rq_dstreg; /* Dst Trans Req */ + struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req */ + struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req */ + struct dmareg rq_lst_srcreg; /* Last Src Trans Req */ + struct dmareg rq_lst_dstreg; /* Last Dst Trans Req */ + struct dmareg dma_cfg; /* DMA Config */ + struct dmareg dma_chan_en; /* DMA Channel Enable */ + struct dmareg dma_id; /* DMA ID */ + struct dmareg dma_test; /* DMA Test */ + struct dmareg res1; /* reserved */ + struct dmareg res2; /* reserved */ + + /* DMA Comp Params + * Param 6 = dma_param[0], Param 5 = dma_param[1], + * Param 4 = dma_param[2] ... + */ + struct dmareg dma_params[6]; +}; + +/* Data structure for linked list item */ +struct lli { + u32 sar; /* Source Address */ + u32 dar; /* Destination address */ + u32 llp; /* Linked List Pointer */ + struct dmareg ctl; /* Control */ +#if defined(CONFIG_APM821xx) + u32 dstat; /* Source status is not supported */ +#else + struct dmareg dstat; /* Destination Status */ +#endif +}; + +#define SATA_DWC_DMAC_LLI_SZ (sizeof(struct lli)) +#define SATA_DWC_DMAC_LLI_NUM 256 +#define SATA_DWC_DMAC_TWIDTH_BYTES 4 +#define SATA_DWC_DMAC_LLI_TBL_SZ \ + (SATA_DWC_DMAC_LLI_SZ * SATA_DWC_DMAC_LLI_NUM) +#if defined(CONFIG_APM821xx) +#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \ + (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES) +#else +#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \ + (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES) +#endif +/* DMA Register Operation Bits */ +#define DMA_EN 0x00000001 /* Enable AHB DMA */ +#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */ +#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \ + ((0x000000001 << (ch)) << 8)) +#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8)) + +/* Channel Control Register */ +#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */ +#define DMA_CTL_LLP_SRCEN 0x10000000 /* Blk chain enable Src */ +#define DMA_CTL_LLP_DSTEN 0x08000000 /* Blk chain enable Dst */ +/* + * This define is used to set block chaining disabled in the control low + * register. It is already in little endian format so it can be &'d dirctly. + * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN)) + */ +#define DMA_CTL_LLP_DISABLE_LE32 0xffffffe7 +#define DMA_CTL_SMS(num) ((num & 0x3) << 25) /*Src Master Select*/ +#define DMA_CTL_DMS(num) ((num & 0x3) << 23) /*Dst Master Select*/ +#define DMA_CTL_TTFC(type) ((type & 0x7) << 20) /*Type&Flow cntr*/ +#define DMA_CTL_TTFC_P2M_DMAC 0x00000002 /*Per mem,DMAC cntr*/ +#define DMA_CTL_TTFC_M2P_PER 0x00000003 /*Mem per,peri cntr*/ +#define DMA_CTL_SRC_MSIZE(size) ((size & 0x7) << 14) /*Src Burst Len*/ +#define DMA_CTL_DST_MSIZE(size) ((size & 0x7) << 11) /*Dst Burst Len*/ +#define DMA_CTL_SINC_INC 0x00000000 /*Src addr incr*/ +#define DMA_CTL_SINC_DEC 0x00000200 +#define DMA_CTL_SINC_NOCHANGE 0x00000400 +#define DMA_CTL_DINC_INC 0x00000000 /*Dst addr incr*/ +#define DMA_CTL_DINC_DEC 0x00000080 +#define DMA_CTL_DINC_NOCHANGE 0x00000100 +#define DMA_CTL_SRC_TRWID(size) ((size & 0x7) << 4) /*Src Trnsfr Width*/ +#define DMA_CTL_DST_TRWID(size) ((size & 0x7) << 1) /*Dst Trnsfr Width*/ +#define DMA_CTL_INT_EN 0x00000001 /*Interrupt Enable*/ + +/* Channel Configuration Register high bits */ +#define DMA_CFG_FCMOD_REQ 0x00000001 /*Flow cntrl req*/ +#define DMA_CFG_PROTCTL (0x00000003 << 2) /*Protection cntrl*/ + +/* Channel Configuration Register low bits */ +#define DMA_CFG_RELD_DST 0x80000000 /*Reload Dst/Src Addr*/ +#define DMA_CFG_RELD_SRC 0x40000000 +#define DMA_CFG_HS_SELSRC 0x00000800 /*SW hndshk Src/Dst*/ +#define DMA_CFG_HS_SELDST 0x00000400 +#define DMA_CFG_FIFOEMPTY (0x00000001 << 9) /*FIFO Empty bit*/ + +/* Assign hardware handshaking interface (x) to dst / sre peripheral */ +#define DMA_CFG_HW_HS_DEST(int_num) ((int_num & 0xF) << 11) +#define DMA_CFG_HW_HS_SRC(int_num) ((int_num & 0xF) << 7) + +/* Channel Linked List Pointer Register */ +#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master)) +#define DMA_LLP_AHBMASTER1 0 /* List Master Select */ +#define DMA_LLP_AHBMASTER2 1 + +#define SATA_DWC_MAX_PORTS 1 + +#define SATA_DWC_SCR_OFFSET 0x24 +#define SATA_DWC_REG_OFFSET 0x64 + +/* DWC SATA Registers */ +struct sata_dwc_regs { + u32 fptagr; /* 1st party DMA tag */ + u32 fpbor; /* 1st party DMA buffer offset */ + u32 fptcr; /* 1st party DMA Xfr count */ + u32 dmacr; /* DMA Control */ + u32 dbtsr; /* DMA Burst Transac size */ + u32 intpr; /* Interrupt Pending */ + u32 intmr; /* Interrupt Mask */ + u32 errmr; /* Error Mask */ + u32 llcr; /* Link Layer Control */ + u32 phycr; /* PHY Control */ + u32 physr; /* PHY Status */ + u32 rxbistpd; /* Recvd BIST pattern def register */ + u32 rxbistpd1; /* Recvd BIST data dword1 */ + u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ + u32 txbistpd; /* Trans BIST pattern def register */ + u32 txbistpd1; /* Trans BIST data dword1 */ + u32 txbistpd2; /* Trans BIST data dword2 */ + u32 bistcr; /* BIST Control Register */ + u32 bistfctr; /* BIST FIS Count Register */ + u32 bistsr; /* BIST Status Register */ + u32 bistdecr; /* BIST Dword Error count register */ + u32 res[15]; /* Reserved locations */ + u32 testr; /* Test Register */ + u32 versionr; /* Version Register */ + u32 idr; /* ID Register */ + u32 unimpl[192]; /* Unimplemented */ + u32 dmadr[256]; /* FIFO Locations in DMA Mode */ +}; + +#define SCR_SCONTROL_DET_ENABLE 0x00000001 +#define SCR_SSTATUS_DET_PRESENT 0x00000001 +#define SCR_SERROR_DIAG_X 0x04000000 + +/* DWC SATA Register Operations */ +#define SATA_DWC_TXFIFO_DEPTH 0x01FF +#define SATA_DWC_RXFIFO_DEPTH 0x01FF + +#define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004 +#define SATA_DWC_DMACR_TXCHEN (0x00000001 | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_RXCHEN (0x00000002 | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) | \ + SATA_DWC_DMACR_TMOD_TXCHEN) +#define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN + +#define SATA_DWC_DBTSR_MWR(size) ((size/4) & \ + SATA_DWC_TXFIFO_DEPTH) +#define SATA_DWC_DBTSR_MRD(size) (((size/4) & \ + SATA_DWC_RXFIFO_DEPTH) << 16) + +// SATA DWC Interrupts +#define SATA_DWC_INTPR_DMAT 0x00000001 +#define SATA_DWC_INTPR_NEWFP 0x00000002 +#define SATA_DWC_INTPR_PMABRT 0x00000004 +#define SATA_DWC_INTPR_ERR 0x00000008 +#define SATA_DWC_INTPR_NEWBIST 0x00000010 +#define SATA_DWC_INTPR_IPF 0x80000000 +// Interrupt masks +#define SATA_DWC_INTMR_DMATM 0x00000001 +#define SATA_DWC_INTMR_NEWFPM 0x00000002 +#define SATA_DWC_INTMR_PMABRTM 0x00000004 +#define SATA_DWC_INTMR_ERRM 0x00000008 +#define SATA_DWC_INTMR_NEWBISTM 0x00000010 +#define SATA_DWC_INTMR_PRIMERRM 0x00000020 +#define SATA_DWC_INTPR_CMDGOOD 0x00000080 +#define SATA_DWC_INTPR_CMDABORT 0x00000040 + +#define SATA_DWC_LLCR_SCRAMEN 0x00000001 +#define SATA_DWC_LLCR_DESCRAMEN 0x00000002 +#define SATA_DWC_LLCR_RPDEN 0x00000004 + +// Defines for SError register +#define SATA_DWC_SERR_ERRI 0x00000001 // Recovered data integrity error +#define SATA_DWC_SERR_ERRM 0x00000002 // Recovered communication error +#define SATA_DWC_SERR_ERRT 0x00000100 // Non-recovered transient data integrity error +#define SATA_DWC_SERR_ERRC 0x00000200 // Non-recovered persistent communication or data integrity error +#define SATA_DWC_SERR_ERRP 0x00000400 // Protocol error +#define SATA_DWC_SERR_ERRE 0x00000800 // Internal host adapter error +#define SATA_DWC_SERR_DIAGN 0x00010000 // PHYRdy change +#define SATA_DWC_SERR_DIAGI 0x00020000 // PHY internal error +#define SATA_DWC_SERR_DIAGW 0x00040000 // Phy COMWAKE signal is detected +#define SATA_DWC_SERR_DIAGB 0x00080000 // 10b to 8b decoder err +#define SATA_DWC_SERR_DIAGT 0x00100000 // Disparity error +#define SATA_DWC_SERR_DIAGC 0x00200000 // CRC error +#define SATA_DWC_SERR_DIAGH 0x00400000 // Handshake error +#define SATA_DWC_SERR_DIAGL 0x00800000 // Link sequence (illegal transition) error +#define SATA_DWC_SERR_DIAGS 0x01000000 // Transport state transition error +#define SATA_DWC_SERR_DIAGF 0x02000000 // Unrecognized FIS type +#define SATA_DWC_SERR_DIAGX 0x04000000 // Exchanged error - Set when PHY COMINIT signal is detected. +#define SATA_DWC_SERR_DIAGA 0x08000000 // Port Selector Presence detected + +/* This is all error bits, zero's are reserved fields. */ +#define SATA_DWC_SERR_ERR_BITS 0x0FFF0F03 + +#define SATA_DWC_SCR0_SPD_GET(v) ((v >> 4) & 0x0000000F) + +struct sata_dwc_device { + struct resource reg; /* Resource for register */ + struct device *dev; /* generic device struct */ + struct ata_probe_ent *pe; /* ptr to probe-ent */ + struct ata_host *host; + u8 *reg_base; + struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ + u8 *scr_base; + int dma_channel; /* DWC SATA DMA channel */ + int irq_dma; + struct timer_list an_timer; +}; + +#define SATA_DWC_QCMD_MAX 32 + +struct sata_dwc_device_port { + struct sata_dwc_device *hsdev; + int cmd_issued[SATA_DWC_QCMD_MAX]; + struct lli *llit[SATA_DWC_QCMD_MAX]; + dma_addr_t llit_dma[SATA_DWC_QCMD_MAX]; + u32 dma_chan[SATA_DWC_QCMD_MAX]; + int dma_pending[SATA_DWC_QCMD_MAX]; + u32 sata_dwc_sactive_issued; /* issued queued ops */ + u32 sata_dwc_sactive_queued; /* queued ops */ + u32 dma_interrupt_count; + +}; + +static struct sata_dwc_device* dwc_dev_list[2]; +static int dma_intr_registered = 0; +/* + * Commonly used DWC SATA driver Macros + */ +#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *) \ + (host)->private_data) +#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *) \ + (ap)->host->private_data) +#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *) \ + (ap)->private_data) +#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *) \ + (qc)->ap->host->private_data) +#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *) \ + (hsdevp)->hsdev) + +enum { + SATA_DWC_CMD_ISSUED_NOT = 0, + SATA_DWC_CMD_ISSUED_PENDING = 1, + SATA_DWC_CMD_ISSUED_EXEC = 2, + SATA_DWC_CMD_ISSUED_NODATA = 3, + + SATA_DWC_DMA_PENDING_NONE = 0, + SATA_DWC_DMA_PENDING_TX = 1, + SATA_DWC_DMA_PENDING_RX = 2, +}; + +/* + * Globals + */ +static struct ahb_dma_regs *sata_dma_regs = 0; + +/* + * Prototypes + */ +static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); +static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, + u32 check_status); +static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); +static void sata_dwc_port_stop(struct ata_port *ap); +static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); + +static int dma_dwc_init(struct sata_dwc_device *hsdev); +static void dma_dwc_exit(struct sata_dwc_device *hsdev); +static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc, + struct lli *lli, dma_addr_t dma_lli, + void __iomem *addr); +static void dma_dwc_xfer_start(int dma_ch); +static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch); +static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev); +static void sata_dwc_init_port ( struct ata_port *ap ); +u8 sata_dwc_check_status(struct ata_port *ap); + + + + +static const char *dir_2_txt(enum dma_data_direction dir) +{ + switch (dir) { + case DMA_BIDIRECTIONAL: + return "bi"; + case DMA_FROM_DEVICE: + return "from"; + case DMA_TO_DEVICE: + return "to"; + case DMA_NONE: + return "none"; + default: + return "err"; + } +} + +static const char *prot_2_txt(enum ata_tf_protocols protocol) +{ + switch (protocol) { + case ATA_PROT_UNKNOWN: + return "unknown"; + case ATA_PROT_NODATA: + return "nodata"; + case ATA_PROT_PIO: + return "pio"; + case ATA_PROT_DMA: + return "dma"; + case ATA_PROT_NCQ: + return "ncq"; + case ATAPI_PROT_PIO: + return "atapi pio"; + case ATAPI_PROT_NODATA: + return "atapi nodata"; + case ATAPI_PROT_DMA: + return "atapi dma"; + default: + return "err"; + } +} + +inline const char *ata_cmd_2_txt(const struct ata_taskfile *tf) +{ + switch (tf->command) { + case ATA_CMD_CHK_POWER: + return "ATA_CMD_CHK_POWER"; + case ATA_CMD_EDD: + return "ATA_CMD_EDD"; + case ATA_CMD_FLUSH: + return "ATA_CMD_FLUSH"; + case ATA_CMD_FLUSH_EXT: + return "ATA_CMD_FLUSH_EXT"; + case ATA_CMD_ID_ATA: + return "ATA_CMD_ID_ATA"; + case ATA_CMD_ID_ATAPI: + return "ATA_CMD_ID_ATAPI"; + case ATA_CMD_FPDMA_READ: + return "ATA_CMD_FPDMA_READ"; + case ATA_CMD_FPDMA_WRITE: + return "ATA_CMD_FPDMA_WRITE"; + case ATA_CMD_READ: + return "ATA_CMD_READ"; + case ATA_CMD_READ_EXT: + return "ATA_CMD_READ_EXT"; + case ATA_CMD_READ_NATIVE_MAX_EXT : + return "ATA_CMD_READ_NATIVE_MAX_EXT"; + case ATA_CMD_VERIFY_EXT : + return "ATA_CMD_VERIFY_EXT"; + case ATA_CMD_WRITE: + return "ATA_CMD_WRITE"; + case ATA_CMD_WRITE_EXT: + return "ATA_CMD_WRITE_EXT"; + case ATA_CMD_PIO_READ: + return "ATA_CMD_PIO_READ"; + case ATA_CMD_PIO_READ_EXT: + return "ATA_CMD_PIO_READ_EXT"; + case ATA_CMD_PIO_WRITE: + return "ATA_CMD_PIO_WRITE"; + case ATA_CMD_PIO_WRITE_EXT: + return "ATA_CMD_PIO_WRITE_EXT"; + case ATA_CMD_SET_FEATURES: + return "ATA_CMD_SET_FEATURES"; + case ATA_CMD_PACKET: + return "ATA_CMD_PACKET"; + case ATA_CMD_PMP_READ: + return "ATA_CMD_PMP_READ"; + case ATA_CMD_PMP_WRITE: + return "ATA_CMD_PMP_WRITE"; + default: + return "ATA_CMD_???"; + } +} + +/* + * Dump content of the taskfile + */ +static void sata_dwc_tf_dump(struct device *dwc_dev, struct ata_taskfile *tf) +{ + dwc_dev_vdbg(dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx" + "device: %x\n", tf->command, prot_2_txt(tf->protocol), + tf->flags, tf->device); + dwc_dev_vdbg(dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam:" + "0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal, + tf->lbam, tf->lbah); + dwc_dev_vdbg(dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x " + "hob_lbam: 0x%x hob_lbah: 0x%x\n", tf->hob_feature, + tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, + tf->hob_lbah); +} + +/* + * Function: get_burst_length_encode + * arguments: datalength: length in bytes of data + * returns value to be programmed in register corresponding to data length + * This value is effectively the log(base 2) of the length + */ +static inline int get_burst_length_encode(int datalength) +{ + int items = datalength >> 2; /* div by 4 to get lword count */ + + if (items >= 64) + return 5; + + if (items >= 32) + return 4; + + if (items >= 16) + return 3; + + if (items >= 8) + return 2; + + if (items >= 4) + return 1; + + return 0; +} + +/* + * Clear Interrupts on a DMA channel + */ +static inline void clear_chan_interrupts(int c) +{ + out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.block.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.srctran.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.dsttran.low), DMA_CHANNEL(c)); + out_le32(&(sata_dma_regs->interrupt_clear.error.low), DMA_CHANNEL(c)); +} + +/* + * Function: dma_request_channel + * arguments: None + * returns channel number if available else -1 + * This function assigns the next available DMA channel from the list to the + * requester + */ +static int dma_request_channel(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + if (!(in_le32(&(sata_dma_regs->dma_chan_en.low)) & DMA_CHANNEL(hsdev->dma_channel))) { + dwc_port_vdbg(ap, "%s Successfully requested DMA channel %d\n", + __func__, hsdev->dma_channel); + return (hsdev->dma_channel); + } + + return -1; +} + + + +/* + * Function: dma_dwc_interrupt + * arguments: irq, dev_id, pt_regs + * returns channel number if available else -1 + * Interrupt Handler for DW AHB SATA DMA + */ +static int dma_dwc_interrupt(int irq, void *hsdev_instance) +{ + volatile u32 tfr_reg, err_reg; + unsigned long flags; + struct sata_dwc_device *hsdev = hsdev_instance; + struct ata_host *host = (struct ata_host *)hsdev->host; + struct ata_port *ap; + struct sata_dwc_device_port *hsdevp; + u8 tag = 0; + int chan; + unsigned int port = 0; + spin_lock_irqsave(&host->lock, flags); + + ap = host->ports[port]; + hsdevp = HSDEVP_FROM_AP(ap); + tag = ap->link.active_tag; + + dwc_port_vdbg(ap, "%s: DMA interrupt in channel %d\n", __func__, hsdev->dma_channel); + + tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low)); + err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low)); + + dwc_port_vdbg(ap, "eot=0x%08x err=0x%08x pending=%d active port=%d\n", + tfr_reg, err_reg, hsdevp->dma_pending[tag], port); + chan = hsdev->dma_channel; + + if (tfr_reg & DMA_CHANNEL(chan)) { + /* + *Each DMA command produces 2 interrupts. Only + * complete the command after both interrupts have been + * seen. (See sata_dwc_isr()) + */ + hsdevp->dma_interrupt_count++; + sata_dwc_clear_dmacr(hsdevp, tag); + + if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) { + dev_err(ap->dev, "DMA not pending eot=0x%08x " + "err=0x%08x tag=0x%02x pending=%d\n", + tfr_reg, err_reg, tag, + hsdevp->dma_pending[tag]); + } + + // Do remain jobs after DMA transfer complete + if ((hsdevp->dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + + /* Clear the interrupt */ + out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), + DMA_CHANNEL(chan)); + } + + /* Process error interrupt. */ + // We do not expect error happen + if (unlikely(err_reg & DMA_CHANNEL(chan))) { + /* TODO Need error handler ! */ + dev_err(ap->dev, "error interrupt err_reg=0x%08x\n", + err_reg); + + spin_lock_irqsave(ap->lock, flags); + //if (ata_is_dma(qc->tf.protocol)) { + /* disable DMAC */ + dma_dwc_terminate_dma(ap, chan); + //} + spin_unlock_irqrestore(ap->lock, flags); + + /* Clear the interrupt. */ + out_le32(&(sata_dma_regs->interrupt_clear.error.low), + DMA_CHANNEL(chan)); + } + + spin_unlock_irqrestore(&host->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t dma_dwc_handler(int irq, void *hsdev_instance) +{ + volatile u32 tfr_reg, err_reg; + int chan; + + tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low)); + err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low)); + + for (chan = 0; chan < DMA_NUM_CHANS; chan++) { + /* Check for end-of-transfer interrupt. */ + + if (tfr_reg & DMA_CHANNEL(chan)) { + dma_dwc_interrupt(0, dwc_dev_list[chan]); + } + else + + /* Check for error interrupt. */ + if (err_reg & DMA_CHANNEL(chan)) { + dma_dwc_interrupt(0, dwc_dev_list[chan]); + } + } + + return IRQ_HANDLED; +} + +static int dma_register_interrupt (struct sata_dwc_device *hsdev) +{ + int retval = 0; + int irq = hsdev->irq_dma; + /* + * FIXME: 2 SATA controllers share the same DMA engine so + * currently, they also share same DMA interrupt + */ + if (!dma_intr_registered) { + printk("%s register irq (%d)\n", __func__, irq); + retval = request_irq(irq, dma_dwc_handler, IRQF_SHARED, "SATA DMA", hsdev); + //retval = request_irq(irq, dma_dwc_handler, IRQF_DISABLED, "SATA DMA", NULL); + if (retval) { + dev_err(hsdev->dev, "%s: could not get IRQ %d\n", __func__, irq); + return -ENODEV; + } + //dma_intr_registered = 1; + } + return retval; +} + +/* + * Function: dma_request_interrupts + * arguments: hsdev + * returns status + * This function registers ISR for a particular DMA channel interrupt + */ +static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq) +{ + int retval = 0; + int dma_chan = hsdev->dma_channel; + + /* Unmask error interrupt */ + out_le32(&sata_dma_regs->interrupt_mask.error.low, + in_le32(&sata_dma_regs->interrupt_mask.error.low) | DMA_ENABLE_CHAN(dma_chan)); + + /* Unmask end-of-transfer interrupt */ + out_le32(&sata_dma_regs->interrupt_mask.tfr.low, + in_le32(&sata_dma_regs->interrupt_mask.tfr.low) | DMA_ENABLE_CHAN(dma_chan)); + + dwc_dev_vdbg(hsdev->dev, "Current value of interrupt_mask.error=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.error.low)); + dwc_dev_vdbg(hsdev->dev, "Current value of interrupt_mask.tfr=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.tfr.low)); +#if 0 + out_le32(&sata_dma_regs->interrupt_mask.block.low, + DMA_ENABLE_CHAN(dma_chan)); + + out_le32(&sata_dma_regs->interrupt_mask.srctran.low, + DMA_ENABLE_CHAN(dma_chan)); + + out_le32(&sata_dma_regs->interrupt_mask.dsttran.low, + DMA_ENABLE_CHAN(dma_chan)); +#endif + return retval; +} + +/* + * Function: map_sg_to_lli + * arguments: sg: scatter/gather list(sg) + * num_elems: no of elements in sg list + * dma_lli: LLI table + * dest: destination address + * read: whether the transfer is read or write + * returns array of AHB DMA Linked List Items + * This function creates a list of LLIs for DMA Xfr and returns the number + * of elements in the DMA linked list. + * + * Note that the Synopsis driver has a comment proposing that better performance + * is possible by only enabling interrupts on the last item in the linked list. + * However, it seems that could be a problem if an error happened on one of the + * first items. The transfer would halt, but no error interrupt would occur. + * + * Currently this function sets interrupts enabled for each linked list item: + * DMA_CTL_INT_EN. + */ +static int map_sg_to_lli(struct ata_queued_cmd *qc, struct lli *lli, + dma_addr_t dma_lli, void __iomem *dmadr_addr) +{ + struct scatterlist *sg = qc->sg; + struct device *dwc_dev = qc->ap->dev; + int num_elems = qc->n_elem; + int dir = qc->dma_dir; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(qc->ap); + + int i, idx = 0; + int fis_len = 0; + dma_addr_t next_llp; + int bl; + unsigned int dma_ts = 0; + + dwc_port_vdbg(qc->ap, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x " + "dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli, + (u32)dmadr_addr); + + bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); + + for (i = 0; i < num_elems; i++, sg++) { + u32 addr, offset; + u32 sg_len, len; + + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + dwc_port_vdbg(qc->ap, "%s: elem=%d sg_addr=0x%x sg_len=%d\n", + __func__, i, addr, sg_len); + + while (sg_len) { + + if (unlikely(idx >= SATA_DWC_DMAC_LLI_NUM)) { + /* The LLI table is not large enough. */ + dev_err(dwc_dev, "LLI table overrun (idx=%d)\n", + idx); + break; + } + len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ? + SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len; + + offset = addr & 0xffff; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + /* + * Make sure a LLI block is not created that will span a + * 8K max FIS boundary. If the block spans such a FIS + * boundary, there is a chance that a DMA burst will + * cross that boundary -- this results in an error in + * the host controller. + */ + if (unlikely(fis_len + len > 8192)) { + dwc_port_vdbg(qc->ap, "SPLITTING: fis_len=%d(0x%x) " + "len=%d(0x%x)\n", fis_len, fis_len, + len, len); + len = 8192 - fis_len; + fis_len = 0; + } else { + fis_len += len; + } + if (fis_len == 8192) + fis_len = 0; + + /* + * Set DMA addresses and lower half of control register + * based on direction. + */ + dwc_port_vdbg(qc->ap, "%s: sg_len = %d, len = %d\n", __func__, sg_len, len); + +#if defined(CONFIG_APM821xx) + if (dir == DMA_FROM_DEVICE) { + lli[idx].dar = cpu_to_le32(addr); + lli[idx].sar = cpu_to_le32((u32)dmadr_addr); + if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(1) | /* Source: Master 2 */ + DMA_CTL_DMS(0) | /* Dest: Master 1 */ + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(2) | /* Source: Master 3 */ + DMA_CTL_DMS(0) | /* Dest: Master 1 */ + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } + } else { /* DMA_TO_DEVICE */ + lli[idx].sar = cpu_to_le32(addr); + lli[idx].dar = cpu_to_le32((u32)dmadr_addr); + if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(0) | + DMA_CTL_DMS(1) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */ + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(0) | + DMA_CTL_DMS(2) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } + } +#else + if (dir == DMA_FROM_DEVICE) { + lli[idx].dar = cpu_to_le32(addr); + lli[idx].sar = cpu_to_le32((u32)dmadr_addr); + + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) | + DMA_CTL_SMS(0) | + DMA_CTL_DMS(1) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_SINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } else { /* DMA_TO_DEVICE */ + lli[idx].sar = cpu_to_le32(addr); + lli[idx].dar = cpu_to_le32((u32)dmadr_addr); + + lli[idx].ctl.low = cpu_to_le32( + DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) | + DMA_CTL_SMS(1) | + DMA_CTL_DMS(0) | + DMA_CTL_SRC_MSIZE(bl) | + DMA_CTL_DST_MSIZE(bl) | + DMA_CTL_DINC_NOCHANGE | + DMA_CTL_SRC_TRWID(2) | + DMA_CTL_DST_TRWID(2) | + DMA_CTL_INT_EN | + DMA_CTL_LLP_SRCEN | + DMA_CTL_LLP_DSTEN); + } +#endif + dwc_port_vdbg(qc->ap, "%s setting ctl.high len: 0x%08x val: " + "0x%08x\n", __func__, len, + DMA_CTL_BLK_TS(len / 4)); + + /* Program the LLI CTL high register */ + dma_ts = DMA_CTL_BLK_TS(len / 4); + lli[idx].ctl.high = cpu_to_le32(dma_ts); + + /* + *Program the next pointer. The next pointer must be + * the physical address, not the virtual address. + */ + next_llp = (dma_lli + ((idx + 1) * sizeof(struct lli))); + + /* The last 2 bits encode the list master select. */ +#if defined(CONFIG_APM821xx) + next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER1); +#else + next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2); +#endif + + lli[idx].llp = cpu_to_le32(next_llp); + + dwc_port_vdbg(qc->ap, "%s: index %d\n", __func__, idx); + dwc_port_vdbg(qc->ap, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx].ctl.high); + dwc_port_vdbg(qc->ap, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx].ctl.low); + dwc_port_vdbg(qc->ap, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx].dar); + dwc_port_vdbg(qc->ap, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx].sar); + dwc_port_vdbg(qc->ap, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx].llp); + + idx++; + sg_len -= len; + addr += len; + } + } + + /* + * The last next ptr has to be zero and the last control low register + * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source + * and destination enable) set back to 0 (disabled.) This is what tells + * the core that this is the last item in the linked list. + */ + if (likely(idx)) { + lli[idx-1].llp = 0x00000000; + lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32; + + /* Flush cache to memory */ + dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx), + DMA_BIDIRECTIONAL); + } + + dwc_port_vdbg(qc->ap, "%s: Final index %d\n", __func__, idx-1); + dwc_port_vdbg(qc->ap, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx-1].ctl.high); + dwc_port_vdbg(qc->ap, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx-1].ctl.low); + dwc_port_vdbg(qc->ap, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx-1].dar); + dwc_port_vdbg(qc->ap, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx-1].sar); + dwc_port_vdbg(qc->ap, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx-1].llp); + + return idx; +} + +/* + * Function: dma_dwc_xfer_start + * arguments: Channel number + * Return : None + * Enables the DMA channel + */ +static void dma_dwc_xfer_start(int dma_ch) +{ + /* Enable the DMA channel */ + out_le32(&(sata_dma_regs->dma_chan_en.low), + in_le32(&(sata_dma_regs->dma_chan_en.low)) | + DMA_ENABLE_CHAN(dma_ch)); + +#if defined(CONFIG_SATA_DWC_VDEBUG) + printk("DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low))); + printk("%s: setting sata_dma_regs->dma_chan_en.low with val: 0x%08x\n", + __func__, in_le32(&(sata_dma_regs->dma_chan_en.low))); +#endif + + +#if defined(CONFIG_APOLLO3G) + signal_hdd_led(1 /*blink=yes*/, 2 /* _3G_LED_GREEN */); +#endif + +} + +/* + * Check if the selected DMA channel is currently enabled. + */ +static int dma_dwc_channel_enabled(int ch) +{ + u32 dma_chan; + + // Read the DMA channel register + dma_chan = in_le32(&(sata_dma_regs->dma_chan_en.low)); + if (dma_chan & DMA_CHANNEL(ch)) + return 1; + + return 0; +} + +/* + * Terminate the current DMA transaction + */ +static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch) +{ + int enabled = dma_dwc_channel_enabled(dma_ch); + + dev_info(ap->dev, "%s terminate DMA on channel=%d enabled=%d\n", + __func__, dma_ch, enabled); + + if (enabled) { + // Disable the selected channel + out_le32(&(sata_dma_regs->dma_chan_en.low), + in_le32(&(sata_dma_regs->dma_chan_en.low)) | DMA_DISABLE_CHAN(dma_ch)); + + // Wait for the channel is disabled + do { + enabled = dma_dwc_channel_enabled(dma_ch); + msleep(10); + } while (enabled); + } +} + + +/* + * Setup data and DMA configuration ready for DMA transfer + */ +static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc, + struct lli *lli, dma_addr_t dma_lli, + void __iomem *addr) +{ + int dma_ch; + int num_lli; + + /* Acquire DMA channel */ + dma_ch = dma_request_channel(qc->ap); + if (unlikely(dma_ch == -1)) { + dev_err(qc->ap->dev, "%s: dma channel unavailable\n", __func__); + return -EAGAIN; + } + dwc_port_vdbg(qc->ap, "%s: Got channel %d\n", __func__, dma_ch); + + /* Convert SG list to linked list of items (LLIs) for AHB DMA */ + num_lli = map_sg_to_lli(qc, lli, dma_lli, addr); + + dwc_port_vdbg(qc->ap, "%s sg: 0x%p, count: %d lli: %p dma_lli: 0x%0xlx addr:" + " %p lli count: %d\n", __func__, qc->sg, qc->n_elem, lli, + (u32)dma_lli, addr, num_lli); + + /* Clear channel interrupts */ + clear_chan_interrupts(dma_ch); + + /* Program the CFG register. */ +#if defined(CONFIG_APM821xx) + if (dma_ch == 0) { + /* Buffer mode enabled, FIFO_MODE=0 */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000009); + /* Channel 0 bit[7:5] */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020); + } else if (dma_ch == 1) { + /* Buffer mode enabled, FIFO_MODE=0 */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000088d); + /* Channel 1 bit[7:5] */ + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020); + } +#else + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), + DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ); + out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0); +#endif + + /* Program the address of the linked list */ +#if defined(CONFIG_APM821xx) + out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low), + DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER1)); +#else + out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low), + DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2)); +#endif + + /* Program the CTL register with src enable / dst enable */ + //out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low), + // DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN); + out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low), 0x18000000); + + dwc_port_vdbg(qc->ap, "%s DMA channel %d is ready\n", __func__, dma_ch); + dwc_port_vdbg(qc->ap, "%s setting cfg.high of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high))); + dwc_port_vdbg(qc->ap, "%s setting cfg.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low))); + dwc_port_vdbg(qc->ap, "%s setting llp.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low))); + dwc_port_vdbg(qc->ap, "%s setting ctl.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low))); + + return dma_ch; +} + +/* + * Function: dma_dwc_exit + * arguments: None + * returns status + * This function exits the SATA DMA driver + */ +static void dma_dwc_exit(struct sata_dwc_device *hsdev) +{ + dwc_dev_vdbg(hsdev->dev, "%s:\n", __func__); + if (sata_dma_regs) + iounmap(sata_dma_regs); + + if (hsdev->irq_dma) + free_irq(hsdev->irq_dma, hsdev); +} + +/* + * Function: dma_dwc_init + * arguments: hsdev + * returns status + * This function initializes the SATA DMA driver + */ +static int dma_dwc_init(struct sata_dwc_device *hsdev) +{ + int err; + int irq = hsdev->irq_dma; + + err = dma_request_interrupts(hsdev, irq); + if (err) { + dev_err(hsdev->dev, "%s: dma_request_interrupts returns %d\n", + __func__, err); + goto error_out; + } + + /* Enabe DMA */ + out_le32(&(sata_dma_regs->dma_cfg.low), DMA_EN); + + dev_notice(hsdev->dev, "DMA initialized\n"); + dev_notice(hsdev->dev, "DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low))); + dwc_dev_vdbg(hsdev->dev, "SATA DMA registers=0x%p\n", sata_dma_regs); + + return 0; + +error_out: + dma_dwc_exit(hsdev); + + return err; +} + + +static void sata_dwc_dev_config(struct ata_device *adev) +{ + /* + * Does not support NCQ over a port multiplier + * (no FIS-based switching). + */ + if (adev->flags & ATA_DFLAG_NCQ) { + /* + * TODO: debug why enabling NCQ makes the linux crashed + * in hot plug after the first hot unplug action. + * --> need to investigate more + */ + adev->flags &= ~ATA_DFLAG_NCQ; + if (sata_pmp_attached(adev->link->ap)) { + adev->flags &= ~ATA_DFLAG_NCQ; + ata_dev_printk(adev, KERN_INFO, + "NCQ disabled for command-based switching\n"); + } + } + + /* + * Since the sata_pmp_error_handler function in libata-pmp + * make FLAG_AN disabled in the first time SATA port is configured. + * Asynchronous notification is not configured. + * This will enable the AN feature manually. + */ + adev->flags |= ATA_DFLAG_AN; +} + + +static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + if (unlikely(scr > SCR_NOTIFICATION)) { + dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", + __func__, scr); + return -EINVAL; + } + + *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4)); + dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", + __func__, link->ap->print_id, scr, *val); + + return 0; +} + +static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", + __func__, link->ap->print_id, scr, val); + if (unlikely(scr > SCR_NOTIFICATION)) { + dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", + __func__, scr); + return -EINVAL; + } + out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val); + + return 0; +} + +static inline u32 sata_dwc_core_scr_read ( struct ata_port *ap, unsigned int scr) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + return in_le32((void __iomem *)hsdev->scr_base + (scr * 4)); +} + + +static inline void sata_dwc_core_scr_write ( struct ata_port *ap, unsigned int scr, u32 val) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + out_le32((void __iomem *)hsdev->scr_base + (scr * 4), val); +} + +static inline void clear_serror(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + out_le32( (void __iomem *)hsdev->scr_base + 4, + in_le32((void __iomem *)hsdev->scr_base + 4)); +} + +static inline void clear_intpr(struct sata_dwc_device *hsdev) +{ + out_le32(&hsdev->sata_dwc_regs->intpr, + in_le32(&hsdev->sata_dwc_regs->intpr)); +} + +static inline void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) +{ + out_le32(&hsdev->sata_dwc_regs->intpr, bit); + // in_le32(&hsdev->sata_dwc_regs->intpr)); +} + + +static inline void enable_err_irq(struct sata_dwc_device *hsdev) +{ + out_le32(&hsdev->sata_dwc_regs->intmr, + in_le32(&hsdev->sata_dwc_regs->intmr) | SATA_DWC_INTMR_ERRM); + out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS); +} + +static inline u32 qcmd_tag_to_mask(u8 tag) +{ + return 0x00000001 << (tag & 0x1f); +} + + +/* + * Timer to monitor SCR_NOTIFICATION registers on the + * SATA port + */ +static void sata_dwc_an_chk(unsigned long arg) +{ + struct ata_port *ap = (void *)arg; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + unsigned long flags; + int rc = 0x0; + u32 sntf = 0x0; + + spin_lock_irqsave(ap->lock, flags); + rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); + + // If some changes on the SCR4, call asynchronous notification + if ( (rc == 0) & (sntf != 0)) { + dwc_port_dbg(ap, "Call assynchronous notification sntf=0x%08x\n", sntf); + sata_async_notification(ap); + hsdev->an_timer.expires = jiffies + msecs_to_jiffies(8000); + } else { + hsdev->an_timer.expires = jiffies + msecs_to_jiffies(3000); + } + add_timer(&hsdev->an_timer); + spin_unlock_irqrestore(ap->lock, flags); +} + + +/* + * sata_dwc_pmp_select - Set the PMP field in SControl to the specified port number. + * + * @port: The value (port number) to set the PMP field to. + * + * @return: The old value of the PMP field. + */ +static u32 sata_dwc_pmp_select(struct ata_port *ap, u32 port) +{ + u32 scontrol, old_port; + if (sata_pmp_supported(ap)) { + scontrol = sata_dwc_core_scr_read(ap, SCR_CONTROL); + old_port = SCONTROL_TO_PMP(scontrol); + + // Select new PMP port + if ( port != old_port ) { + scontrol &= ~SCONTROL_PMP_MASK; + sata_dwc_core_scr_write(ap, SCR_CONTROL, scontrol | PMP_TO_SCONTROL(port)); + dwc_port_dbg(ap, "%s: old port=%d new port=%d\n", __func__, old_port, port); + } + return old_port; + } + else + return port; +} + +/* + * Get the current PMP port + */ +static inline u32 current_pmp(struct ata_port *ap) +{ + return SCONTROL_TO_PMP(sata_dwc_core_scr_read(ap, SCR_CONTROL)); +} + + +/* + * Process when a PMP card is attached in the SATA port. + * Since our SATA port support command base switching only, + * NCQ will not be available. + * We disable the NCQ feature in SATA port. + */ +static void sata_dwc_pmp_attach ( struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + dev_info(ap->dev, "Attach SATA port multiplier with %d ports\n", ap->nr_pmp_links); + // Disable NCQ + ap->flags &= ~ATA_FLAG_NCQ; + + // Initialize timer for checking AN + init_timer(&hsdev->an_timer); + hsdev->an_timer.expires = jiffies + msecs_to_jiffies(20000); + hsdev->an_timer.function = sata_dwc_an_chk; + hsdev->an_timer.data = (unsigned long)(ap); + add_timer(&hsdev->an_timer); +} + +/* + * Process when PMP card is removed from the SATA port. + * Re-enable NCQ for using by the SATA drive in the future + */ +static void sata_dwc_pmp_detach ( struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + dev_info(ap->dev, "Detach SATA port\n"); + // Re-enable NCQ + // TODO: remove the below comment out when NCQ problem fixed + //ap->flags |= ATA_FLAG_NCQ; + + sata_dwc_pmp_select(ap, 0); + + // Delete timer since PMP card is detached + del_timer(&hsdev->an_timer); +} + + + +// Check the link to be ready +int sata_dwc_check_ready ( struct ata_link *link ) { + u8 status; + struct ata_port *ap = link->ap; + status = ioread8(ap->ioaddr.status_addr); + return ata_check_ready(status); +} + + +/* + * Do soft reset on the current SATA link. + */ +static int sata_dwc_softreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + int rc; + struct ata_port *ap = link->ap; + struct ata_ioports *ioaddr = &ap->ioaddr; + struct ata_taskfile tf; + + sata_dwc_pmp_select(link->ap, sata_srst_pmp(link)); + + /* Issue bus reset */ + iowrite8(ap->ctl, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + iowrite8(ap->ctl, ioaddr->ctl_addr); + ap->last_ctl = ap->ctl; + + /* Always check readiness of the master device */ + rc = ata_wait_after_reset(link, deadline, sata_dwc_check_ready); + + // Classify the ata_port + *classes = ATA_DEV_NONE; + /* Verify if SStatus indicates device presence */ + if (ata_link_online(link)) { + memset(&tf, 0, sizeof(tf)); + ata_sff_tf_read(ap, &tf); + *classes = ata_dev_classify(&tf); + } + + if ( *classes == ATA_DEV_PMP) + dwc_link_dbg(link, "-->found PMP device by sig\n"); + + clear_serror(link->ap); + + return rc; +} + + + + +/* + * sata_dwc_hardreset - Do hardreset the SATA controller + */ +static int sata_dwc_hardreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + int rc; + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + bool online; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); + + dwc_link_dbg(link, "%s\n", __func__); + sata_dwc_pmp_select(link->ap, sata_srst_pmp(link)); + dwc_port_vdbg(link->ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr))); + + // Call standard hard reset + rc = sata_link_hardreset(link, timing, deadline, &online, NULL); + + // Reconfigure the port after hard reset + if ( ata_link_online(link) ) + sata_dwc_init_port(link->ap); + + return online ? -EAGAIN : rc; +} + +/* + * Do hard reset on each PMP link + */ +static int sata_dwc_pmp_hardreset(struct ata_link *link, unsigned int *classes, + unsigned long deadline) +{ + int rc = 0; + sata_dwc_pmp_select(link->ap, sata_srst_pmp(link)); + rc = sata_std_hardreset(link, classes, deadline); + return rc; +} + +/* See ahci.c */ +/* + * Process error when the SATAn_INTPR's ERR bit is set + * The processing is based on SCR_ERROR register content + */ +static void sata_dwc_error_intr(struct ata_port *ap, + struct sata_dwc_device *hsdev, uint intpr) +{ + struct ata_eh_info *ehi; + struct ata_link *link; + struct ata_queued_cmd *active_qc = NULL; + u32 serror; + bool freeze = false, abort = false; + int pmp, ret; + unsigned int err_mask = 0, action = 0; +#if defined(CONFIG_SATA_DWC_VDEBUG) + int dma_chan = hsdev->dma_channel; +#endif + + link = &ap->link; + ehi = &link->eh_info; + + /* Record irq stat */ + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "irq_stat 0x%08x", intpr); + + // Record SERROR + serror = sata_dwc_core_scr_read(ap, SCR_ERROR); + dwc_port_dbg(ap, "%s serror = 0x%08x\n", __func__, serror); + + // Clear SERROR and interrupt bit + clear_serror(ap); + clear_intpr(hsdev); + + // Print out for test only + if ( serror ) { + dwc_port_info(ap, "Detect errors:"); + if ( serror & SATA_DWC_SERR_ERRI ) + printk(" ERRI"); + if ( serror & SATA_DWC_SERR_ERRM ) + printk(" ERRM"); + if ( serror & SATA_DWC_SERR_ERRT ) + printk(" ERRT"); + if ( serror & SATA_DWC_SERR_ERRC ) + printk(" ERRC"); + if ( serror & SATA_DWC_SERR_ERRP ) + printk(" ERRP"); + if ( serror & SATA_DWC_SERR_ERRE ) + printk(" ERRE"); + if ( serror & SATA_DWC_SERR_DIAGN ) + printk(" DIAGN"); + if ( serror & SATA_DWC_SERR_DIAGI ) + printk(" DIAGI"); + if ( serror & SATA_DWC_SERR_DIAGW ) + printk(" DIAGW"); + if ( serror & SATA_DWC_SERR_DIAGB ) + printk(" DIAGB"); + if ( serror & SATA_DWC_SERR_DIAGT ) + printk(" DIAGT"); + if ( serror & SATA_DWC_SERR_DIAGC ) + printk(" DIAGC"); + if ( serror & SATA_DWC_SERR_DIAGH ) + printk(" DIAGH"); + if ( serror & SATA_DWC_SERR_DIAGL ) + printk(" DIAGL"); + if ( serror & SATA_DWC_SERR_DIAGS ) + printk(" DIAGS"); + if ( serror & SATA_DWC_SERR_DIAGF ) + printk(" DIAGF"); + if ( serror & SATA_DWC_SERR_DIAGX ) + printk(" DIAGX"); + if ( serror & SATA_DWC_SERR_DIAGA ) + printk(" DIAGA"); + printk("\n"); + } + +#if defined(CONFIG_SATA_DWC_VDEBUG) + printk("%s reading cfg.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.high))); + printk("%s reading cfg.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.low))); + printk("%s reading llp.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].llp.low))); + printk("%s reading ctl.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].ctl.low))); + printk("%s reading sar.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.low))); + printk("%s reading sar.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.high))); + printk("%s reading dar.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.low))); + printk("%s reading dar.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.high))); +#endif + + // Process hotplug for SATA port + if ( serror & (SATA_DWC_SERR_DIAGX | SATA_DWC_SERR_DIAGW)) { + dwc_port_info(ap, "Detect hot plug signal\n"); + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, serror & SATA_DWC_SERR_DIAGN ? "PHY RDY changed" : "device exchanged"); + freeze = true; + } + + // Process PHY internal error / Link sequence (illegal transition) error + if ( serror & (SATA_DWC_SERR_DIAGI | SATA_DWC_SERR_DIAGL)) { + ehi->err_mask |= AC_ERR_HSM; + ehi->action |= ATA_EH_RESET; + freeze = true; + } + + // Process Internal host adapter error + if ( serror & SATA_DWC_SERR_ERRE ) { + dev_err(ap->dev, "Detect Internal host adapter error\n"); + // --> need to review + ehi->err_mask |= AC_ERR_HOST_BUS; + ehi->action |= ATA_EH_RESET; + freeze = true; + } + + // Process Protocol Error + if ( serror & SATA_DWC_SERR_ERRP ) { + dev_err(ap->dev, "Detect Protocol error\n"); + ehi->err_mask |= AC_ERR_HSM; + ehi->action |= ATA_EH_RESET; + freeze = true; + } + + // Process non-recovered persistent communication error + if ( serror & SATA_DWC_SERR_ERRC ) { + dev_err(ap->dev, "Detect non-recovered persistent communication error\n"); + // --> TODO: review processing error + ehi->err_mask |= AC_ERR_ATA_BUS; + ehi->action |= ATA_EH_SOFTRESET; + //ehi->flags |= ATA_EHI_NO_AUTOPSY; + //freeze = true; + } + + // Non-recovered transient data integrity error + if ( serror & SATA_DWC_SERR_ERRT ) { + dev_err(ap->dev, "Detect non-recovered transient data integrity error\n"); + ehi->err_mask |= AC_ERR_ATA_BUS; + //ehi->err_mask |= AC_ERR_DEV; + ehi->action |= ATA_EH_SOFTRESET; + //ehi->flags |= ATA_EHI_NO_AUTOPSY; + } + + // Since below errors have been recovered by hardware + // they don't need any error processing. + if ( serror & SATA_DWC_SERR_ERRM ) { + dev_warn(ap->dev, "Detect recovered communication error"); + } + if ( serror & SATA_DWC_SERR_ERRI ) { + dev_warn(ap->dev, "Detect recovered data integrity error"); + } + + // If any error occur, process the qc + if (serror & (SATA_DWC_SERR_ERRT | SATA_DWC_SERR_ERRC)) { + //if (serror & 0x03f60f0) { + abort = true; + /* find out the offending link and qc */ + if (sata_pmp_attached(ap)) { + pmp = current_pmp(ap); + // If we are working on the PMP port + if ( pmp < ap->nr_pmp_links ) { + link = &ap->pmp_link[pmp]; + ehi = &link->eh_info; + active_qc = ata_qc_from_tag(ap, link->active_tag); + err_mask |= AC_ERR_DEV; + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); + } else { + err_mask |= AC_ERR_HSM; + action |= ATA_EH_RESET; + freeze = true; + } + + } + // Work on SATA port + else { + freeze = true; + active_qc = ata_qc_from_tag(ap, link->active_tag); + } + + if ( active_qc) { + active_qc->err_mask |= err_mask; + } else { + ehi->err_mask = err_mask; + } + } + + if ( freeze | abort ) { + //sata_dwc_qc_complete(ap, active_qc, 1); + // Terminate DMA channel if it is currenly in use + if ( dma_request_channel(ap) != -1 ) { + dwc_port_dbg(ap, "Terminate DMA channel %d for handling error\n", hsdev->dma_channel); + dma_dwc_terminate_dma(ap, hsdev->dma_channel); + } + } + + if (freeze) { + ret = ata_port_freeze(ap); + ata_port_printk(ap, KERN_INFO, "Freeze port with %d QCs aborted\n", ret); + } + else if (abort) { + if (active_qc) { + ret = ata_link_abort(active_qc->dev->link); + ata_link_printk(link, KERN_INFO, "Abort %d QCs\n", ret); + } else { + ret = ata_port_abort(ap); + ata_port_printk(ap, KERN_INFO, "Abort %d QCs on the SATA port\n", ret); + } + } +} + + +/* + * Function : sata_dwc_isr + * arguments : irq, void *dev_instance, struct pt_regs *regs + * Return value : irqreturn_t - status of IRQ + * This Interrupt handler called via port ops registered function. + * .irq_handler = sata_dwc_isr + */ +static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) +{ + struct ata_host *host = (struct ata_host *)dev_instance; + struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); + struct ata_port *ap; + struct ata_queued_cmd *qc; + unsigned long flags; + u8 status, tag; + int handled, num_processed, port = 0; + u32 intpr, sactive, sactive2, tag_mask; + struct sata_dwc_device_port *hsdevp; + + spin_lock_irqsave(&host->lock, flags); + + /* Read the interrupt register */ + intpr = in_le32(&hsdev->sata_dwc_regs->intpr); + + ap = host->ports[port]; + hsdevp = HSDEVP_FROM_AP(ap); + + dwc_port_dbg(ap,"%s\n",__func__); + if ( intpr != 0x80000080) + dwc_port_dbg(ap, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, ap->link.active_tag); + //dwc_port_dbg(ap, "%s: INTMR=0x%08x, ERRMR=0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->intmr), in_le32(&hsdev->sata_dwc_regs->errmr)); + + /* Check for error interrupt */ + if (intpr & SATA_DWC_INTPR_ERR) { + sata_dwc_error_intr(ap, hsdev, intpr); + handled = 1; +#if defined(CONFIG_APOLLO3G) + signal_hdd_led(0 /*off blink*/, 1 /*red color*/); +#endif + goto done_irqrestore; + } + + /* Check for DMA SETUP FIS (FP DMA) interrupt */ + if (intpr & SATA_DWC_INTPR_NEWFP) { + dwc_port_dbg(ap, "%s: NEWFP INTERRUPT in HSDEV with DMA channel %d\n", __func__, hsdev->dma_channel); + clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); + + tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr)); + dwc_dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); + if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PENDING) + dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); + + hsdevp->sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag); + + qc = ata_qc_from_tag(ap, tag); + /* + * Start FP DMA for NCQ command. At this point the tag is the + * active tag. It is the tag that matches the command about to + * be completed. + */ + qc->ap->link.active_tag = tag; + sata_dwc_bmdma_start_by_tag(qc, tag); + qc->ap->hsm_task_state = HSM_ST_LAST; + + handled = 1; + goto done_irqrestore; + } + + sactive = sata_dwc_core_scr_read(ap, SCR_ACTIVE); + tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive; + + /* If no sactive issued and tag_mask is zero then this is not NCQ */ + if (hsdevp->sata_dwc_sactive_issued == 0 && tag_mask == 0) { + if (ap->link.active_tag == ATA_TAG_POISON) + tag = 0; + else + tag = ap->link.active_tag; + qc = ata_qc_from_tag(ap, tag); + + /* DEV interrupt w/ no active qc? */ + if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { + dev_err(ap->dev, "%s intr with no active qc qc=%p\n", + __func__, qc); + ap->ops->sff_check_status(ap); + handled = 1; + goto done_irqrestore; + } + + status = ap->ops->sff_check_status(ap); + + qc->ap->link.active_tag = tag; + hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; + + if (status & ATA_ERR) { + dwc_dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); + sata_dwc_qc_complete(ap, qc, 1); + handled = 1; + goto done_irqrestore; + } + + dwc_dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", + __func__, prot_2_txt(qc->tf.protocol)); +drv_still_busy: + if (ata_is_dma(qc->tf.protocol)) { + int dma_flag = hsdevp->dma_pending[tag]; + /* + * Each DMA transaction produces 2 interrupts. The DMAC + * transfer complete interrupt and the SATA controller + * operation done interrupt. The command should be + * completed only after both interrupts are seen. + */ + hsdevp->dma_interrupt_count++; + if (unlikely(dma_flag == SATA_DWC_DMA_PENDING_NONE)) { + dev_err(ap->dev, "%s: DMA not pending " + "intpr=0x%08x status=0x%08x pend=%d\n", + __func__, intpr, status, dma_flag); + } + + if ((hsdevp->dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + } else if (ata_is_pio(qc->tf.protocol)) { + ata_sff_hsm_move(ap, qc, status, 0); + handled = 1; + goto done_irqrestore; + } else { + if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) + goto drv_still_busy; + } + + handled = 1; + goto done_irqrestore; + } + + /* + * This is a NCQ command. At this point we need to figure out for which + * tags we have gotten a completion interrupt. One interrupt may serve + * as completion for more than one operation when commands are queued + * (NCQ). We need to process each completed command. + */ + +process_cmd: /* process completed commands */ + sactive = sata_dwc_core_scr_read(ap, SCR_ACTIVE); + tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive; + + if (sactive != 0 || hsdevp->sata_dwc_sactive_issued > 1 || tag_mask > 1) { + dwc_dev_dbg(ap->dev, "%s NCQ: sactive=0x%08x sactive_issued=0x%08x" + " tag_mask=0x%08x\n", __func__, sactive, + hsdevp->sata_dwc_sactive_issued, tag_mask); + } + + if (unlikely((tag_mask | hsdevp->sata_dwc_sactive_issued) != hsdevp->sata_dwc_sactive_issued)) { + dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x " + "sata_dwc_sactive_issued=0x%08x tag_mask=0x%08x\n", + sactive, hsdevp->sata_dwc_sactive_issued, tag_mask); + } + + /* read just to clear ... not bad if currently still busy */ + status = ap->ops->sff_check_status(ap); + dwc_dev_dbg(ap->dev, "%s ATA status register=0x%x, tag_mask=0x%x\n", __func__, status, tag_mask); + + tag = 0; + num_processed = 0; + while (tag_mask) { + num_processed++; + while (!(tag_mask & 0x00000001)) { + tag++; + tag_mask <<= 1; + } + tag_mask &= (~0x00000001); + qc = ata_qc_from_tag(ap, tag); + + /* To be picked up by completion functions */ + qc->ap->link.active_tag = tag; + hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; + + /* Let libata/scsi layers handle error */ + if (unlikely(status & ATA_ERR)) { + dwc_dev_vdbg(ap->dev, "%s ATA_ERR (0x%x)\n", + __func__, status); + + sata_dwc_qc_complete(ap, qc, 1); + handled = 1; + goto done_irqrestore; + } + + /* Process completed command */ + dwc_dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, + prot_2_txt(qc->tf.protocol)); + if (ata_is_dma(qc->tf.protocol)) { + hsdevp->dma_interrupt_count++; + if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) + dev_warn(ap->dev, + "%s: DMA not pending?\n", __func__); + if ((hsdevp->dma_interrupt_count % 2) == 0) + sata_dwc_dma_xfer_complete(ap, 1); + } else { + if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) + goto still_busy; + } + continue; + +still_busy: + ap->stats.idle_irq++; + dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", + ap->print_id); + } /* while tag_mask */ + + /* + * Check to see if any commands completed while we were processing our + * initial set of completed commands (reading of status clears + * interrupts, so we might miss a completed command interrupt if one + * came in while we were processing: + * we read status as part of processing a completed command). + */ + sactive2 = sata_dwc_core_scr_read(ap, SCR_ACTIVE); + if (sactive2 != sactive) { + dwc_dev_dbg(ap->dev, "More finished - sactive=0x%x sactive2=0x%x\n", + sactive, sactive2); + goto process_cmd; + } + handled = 1; + +done_irqrestore: + spin_unlock_irqrestore(&host->lock, flags); +#if defined(CONFIG_APOLLO3G) + signal_hdd_led(0 /*off blink*/, -1 /* no color */); +#endif + return IRQ_RETVAL(handled); +} + + +/* + * Clear DMA Control Register after completing transferring data + * using AHB DMA. + */ +static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); + + if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { + // Clear receive channel enable bit + out_le32(&(hsdev->sata_dwc_regs->dmacr), + SATA_DWC_DMACR_RX_CLEAR( + in_le32(&(hsdev->sata_dwc_regs->dmacr)))); + } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { + // Clear transmit channel enable bit + out_le32(&(hsdev->sata_dwc_regs->dmacr), + SATA_DWC_DMACR_TX_CLEAR( + in_le32(&(hsdev->sata_dwc_regs->dmacr)))); + } else { + /* + * This should not happen, it indicates the driver is out of + * sync. If it does happen, clear dmacr anyway. + */ + dev_err(hsdev->dev, "%s DMA protocol RX and TX DMA not pending " + "tag=0x%02x pending=%d dmacr: 0x%08x\n", + __func__, tag, hsdevp->dma_pending[tag], + in_le32(&(hsdev->sata_dwc_regs->dmacr))); + + // Clear all transmit and receive bit, but TXMOD bit is set to 1 + out_le32(&(hsdev->sata_dwc_regs->dmacr), + SATA_DWC_DMACR_TXRXCH_CLEAR); + } +} + +/* + * + */ +static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) +{ + struct ata_queued_cmd *qc; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + u8 tag = 0; + + tag = ap->link.active_tag; + qc = ata_qc_from_tag(ap, tag); + +#ifdef DEBUG_NCQ + if (tag > 0) { + dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s " + "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command, + dir_2_txt(qc->dma_dir), prot_2_txt(qc->tf.protocol), + in_le32(&(hsdev->sata_dwc_regs->dmacr))); + } +#endif + + if (ata_is_dma(qc->tf.protocol)) { + // DMA out of sync error + if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) { + dev_err(ap->dev, "%s DMA protocol RX and TX DMA not " + "pending dmacr: 0x%08x\n", __func__, + in_le32(&(hsdev->sata_dwc_regs->dmacr))); + } + + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; + sata_dwc_qc_complete(ap, qc, check_status); + ap->link.active_tag = ATA_TAG_POISON; + } else { + sata_dwc_qc_complete(ap, qc, check_status); + } +} + +/* + * + */ +static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, + u32 check_status) +{ + u8 status = 0; + int i = 0; + u32 mask = 0x0; + u8 tag = qc->tag; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + u32 serror; + int dma_ch; + + dwc_dev_vdbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); + + if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)) + dev_err(ap->dev, "TX DMA PENDINGING\n"); + else if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)) + dev_err(ap->dev, "RX DMA PENDINGING\n"); + + if (check_status) { + i = 0; + do { + /* check main status, clearing INTRQ */ + status = ap->ops->sff_check_status(ap); + if (status & ATA_BUSY) { + dwc_dev_vdbg(ap->dev, "STATUS BUSY (0x%02x) [%d]\n", + status, i); + } + if (++i > 10) + break; + } while (status & ATA_BUSY); + + status = ap->ops->sff_check_status(ap); + if (unlikely(status & ATA_BUSY)) + dev_err(ap->dev, "QC complete cmd=0x%02x STATUS BUSY " + "(0x%02x) [%d]\n", qc->tf.command, status, i); + + + // Check error ==> need to process error here + serror = sata_dwc_core_scr_read(ap, SCR_ERROR); + if (unlikely(serror & SATA_DWC_SERR_ERR_BITS)) + { + dev_err(ap->dev, "****** SERROR=0x%08x ******\n", serror); + ap->link.eh_context.i.action |= ATA_EH_RESET; + if (ata_is_dma(qc->tf.protocol)) { + dma_ch = hsdevp->dma_chan[tag]; + dma_dwc_terminate_dma(ap, dma_ch); + } else { + dma_ch = hsdevp->dma_chan[0]; + dma_dwc_terminate_dma(ap, dma_ch); + } + } + } + dwc_dev_vdbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u: " + "protocol=%d\n", qc->tf.command, status, ap->print_id, + qc->tf.protocol); + + /* clear active bit */ + mask = (~(qcmd_tag_to_mask(tag))); + hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask; + hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask; + dwc_port_vdbg(ap, "%s - sata_dwc_sactive_queued=0x%08x, sata_dwc_sactive_issued=0x%08x\n",__func__, hsdevp->sata_dwc_sactive_queued, hsdevp->sata_dwc_sactive_issued); + dwc_port_vdbg(ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr))); + + /* Complete taskfile transaction (does not read SCR registers) */ + ata_qc_complete(qc); + + return 0; +} + +/* + * Clear interrupt and error flags in DMA status register. + */ +void sata_dwc_irq_clear (struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + dwc_port_dbg(ap,"%s\n",__func__); + + // Clear DMA interrupts + clear_chan_interrupts(hsdev->dma_channel); + //sata_dma_regs + //out_le32(&hsdev->sata_dwc_regs->intmr, + // in_le32(&hsdev->sata_dwc_regs->intmr) & ~SATA_DWC_INTMR_ERRM); + //out_le32(&hsdev->sata_dwc_regs->errmr, 0x0); + //sata_dwc_check_status(ap); +} + +/* + * Turn on IRQ + */ +void sata_dwc_irq_on(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + u8 tmp; + + dwc_port_dbg(ap,"%s\n",__func__); + ap->ctl &= ~ATA_NIEN; + ap->last_ctl = ap->ctl; + + if (ioaddr->ctl_addr) + iowrite8(ap->ctl, ioaddr->ctl_addr); + tmp = ata_wait_idle(ap); + + ap->ops->sff_irq_clear(ap); + enable_err_irq(hsdev); +} + + +/* + * This function enables the interrupts in IMR and unmasks them in ERRMR + * + */ +static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) +{ + // Enable interrupts + out_le32(&hsdev->sata_dwc_regs->intmr, + SATA_DWC_INTMR_ERRM | + SATA_DWC_INTMR_NEWFPM | + SATA_DWC_INTMR_PMABRTM | + SATA_DWC_INTMR_DMATM); + + /* + * Unmask the error bits that should trigger an error interrupt by + * setting the error mask register. + */ + out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS); + + dwc_dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", __func__, + in_le32(&hsdev->sata_dwc_regs->intmr), + in_le32(&hsdev->sata_dwc_regs->errmr)); +} + +/* + * Configure DMA and interrupts on SATA port. This should be called after + * hardreset is executed on the SATA port. + */ +static void sata_dwc_init_port ( struct ata_port *ap ) { + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + // Configure DMA + if (ap->port_no == 0) { + dwc_dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", + __func__); + + // Clear all transmit/receive bits + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXRXCH_CLEAR); + + dwc_dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__); + out_le32(&hsdev->sata_dwc_regs->dbtsr, + (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); + } + + // Enable interrupts + sata_dwc_enable_interrupts(hsdev); +} + + +/* + * Setup SATA ioport with corresponding register addresses + */ +static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = (void *)base + 0x00; + port->data_addr = (void *)base + 0x00; + + port->error_addr = (void *)base + 0x04; + port->feature_addr = (void *)base + 0x04; + + port->nsect_addr = (void *)base + 0x08; + + port->lbal_addr = (void *)base + 0x0c; + port->lbam_addr = (void *)base + 0x10; + port->lbah_addr = (void *)base + 0x14; + + port->device_addr = (void *)base + 0x18; + port->command_addr = (void *)base + 0x1c; + port->status_addr = (void *)base + 0x1c; + + port->altstatus_addr = (void *)base + 0x20; + port->ctl_addr = (void *)base + 0x20; +} + + +/* + * Function : sata_dwc_port_start + * arguments : struct ata_ioports *port + * Return value : returns 0 if success, error code otherwise + * This function allocates the scatter gather LLI table for AHB DMA + */ +static int sata_dwc_port_start(struct ata_port *ap) +{ + int err = 0; + struct sata_dwc_device *hsdev; + struct sata_dwc_device_port *hsdevp = NULL; + struct device *pdev; + u32 sstatus; + int i; + + hsdev = HSDEV_FROM_AP(ap); + + dwc_dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); + + hsdev->host = ap->host; + pdev = ap->host->dev; + if (!pdev) { + dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); + err = -ENODEV; + goto cleanup_exit; + } + + /* Allocate Port Struct */ + hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); + if (!hsdevp) { + dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__); + err = -ENOMEM; + goto cleanup_exit; + } + hsdevp->hsdev = hsdev; + + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) + hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; + + ap->bmdma_prd = 0; /* set these so libata doesn't use them */ + ap->bmdma_prd_dma = 0; + + /* + * DMA - Assign scatter gather LLI table. We can't use the libata + * version since it's PRD is IDE PCI specific. + */ + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { + hsdevp->llit[i] = dma_alloc_coherent(pdev, + SATA_DWC_DMAC_LLI_TBL_SZ, + &(hsdevp->llit_dma[i]), + GFP_ATOMIC); + if (!hsdevp->llit[i]) { + dev_err(ap->dev, "%s: dma_alloc_coherent failed size " + "0x%x\n", __func__, SATA_DWC_DMAC_LLI_TBL_SZ); + err = -ENOMEM; + goto cleanup_exit; + } + } + + if (ap->port_no == 0) { + dwc_dev_vdbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", + __func__); + + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXRXCH_CLEAR); + + dwc_dev_vdbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__); + out_le32(&hsdev->sata_dwc_regs->dbtsr, + (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); + ata_port_printk(ap, KERN_INFO, "%s: setting burst size in DBTSR: 0x%08x\n", + __func__, in_le32(&hsdev->sata_dwc_regs->dbtsr)); + } + + /* Clear any error bits before libata starts issuing commands */ + clear_serror(ap); + + ap->private_data = hsdevp; + + /* Are we in Gen I or II */ + sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS); + switch (SATA_DWC_SCR0_SPD_GET(sstatus)) { + case 0x0: + dev_info(ap->dev, "**** No neg speed (nothing attached?) \n"); + break; + case 0x1: + dev_info(ap->dev, "**** GEN I speed rate negotiated \n"); + break; + case 0x2: + dev_info(ap->dev, "**** GEN II speed rate negotiated \n"); + break; + } + +cleanup_exit: + if (err) { + kfree(hsdevp); + sata_dwc_port_stop(ap); + dwc_dev_vdbg(ap->dev, "%s: fail\n", __func__); + } else { + dwc_dev_vdbg(ap->dev, "%s: done\n", __func__); + } + + return err; +} + + +static void sata_dwc_port_stop(struct ata_port *ap) +{ + int i; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + + dwc_port_dbg(ap, "%s: stop port\n", __func__); + + if (hsdevp && hsdev) { + /* deallocate LLI table */ + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) { + dma_free_coherent(ap->host->dev, + SATA_DWC_DMAC_LLI_TBL_SZ, + hsdevp->llit[i], hsdevp->llit_dma[i]); + } + + kfree(hsdevp); + } + ap->private_data = NULL; +} + +/* + * Since the SATA DWC is master only. The dev select operation will + * be removed. + */ +void sata_dwc_dev_select(struct ata_port *ap, unsigned int device) +{ + // Do nothing + ndelay(100); +} + +/* + * Function : sata_dwc_exec_command_by_tag + * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued + * Return value : None + * This function keeps track of individual command tag ids and calls + * ata_exec_command in libata + */ +static void sata_dwc_exec_command_by_tag(struct ata_port *ap, + struct ata_taskfile *tf, + u8 tag, u32 cmd_issued) +{ + unsigned long flags; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + + dwc_dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d, ap->link->tag=0x%08x\n", __func__, tf->command, + ata_cmd_2_txt(tf), tag, ap->link.active_tag); + + spin_lock_irqsave(&ap->host->lock, flags); + hsdevp->cmd_issued[tag] = cmd_issued; + spin_unlock_irqrestore(&ap->host->lock, flags); + + /* + * Clear SError before executing a new command. + * + * TODO if we read a PM's registers now, we will throw away the task + * file values loaded into the shadow registers for this command. + * + * sata_dwc_scr_write and read can not be used here. Clearing the PM + * managed SError register for the disk needs to be done before the + * task file is loaded. + */ + clear_serror(ap); + ata_sff_exec_command(ap, tf); +} + +static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) +{ + sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, + SATA_DWC_CMD_ISSUED_PENDING); +} + +static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) +{ + u8 tag = qc->tag; + + dwc_port_dbg(qc->ap, "%s\n", __func__); + if (ata_is_ncq(qc->tf.protocol)) { + dwc_dev_vdbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", + __func__, qc->ap->link.sactive, tag); + } else { + tag = 0; + } + + sata_dwc_bmdma_setup_by_tag(qc, tag); +} + +static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) +{ + volatile int start_dma; + u32 reg, dma_chan; + struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); + struct ata_port *ap = qc->ap; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + int dir = qc->dma_dir; + dma_chan = hsdevp->dma_chan[tag]; + + /* Used for ata_bmdma_start(qc) -- we are not BMDMA compatible */ + + if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { + start_dma = 1; + if (dir == DMA_TO_DEVICE) + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; + else + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; + } else { + dev_err(ap->dev, "%s: Command not pending cmd_issued=%d " + "(tag=%d) - DMA NOT started\n", __func__, + hsdevp->cmd_issued[tag], tag); + start_dma = 0; + } + + dwc_dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s " + "start_dma? %x\n", __func__, qc, tag, qc->tf.command, + dir_2_txt(qc->dma_dir), start_dma); + sata_dwc_tf_dump(hsdev->dev, &(qc->tf)); + + // Start DMA transfer + if (start_dma) { + reg = sata_dwc_core_scr_read(ap, SCR_ERROR); + if (unlikely(reg & SATA_DWC_SERR_ERR_BITS)) { + dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", + __func__, reg); + //sata_async_notification(ap); + //return; + } + + // Set DMA control registers + if (dir == DMA_TO_DEVICE) + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXCHEN); + else + out_le32(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_RXCHEN); + + dwc_dev_vdbg(ap->dev, "%s: setting DMACR: 0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->dmacr)); + /* Enable AHB DMA transfer on the specified channel */ + dma_dwc_xfer_start(dma_chan); + } +} + + +static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) +{ + u8 tag = qc->tag; + + if (ata_is_ncq(qc->tf.protocol)) { + dwc_dev_vdbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", + __func__, qc->ap->link.sactive, tag); + } else { + tag = 0; + } + + dwc_port_dbg(qc->ap, "%s, tag=0x%08x\n", __func__, tag); + sata_dwc_bmdma_start_by_tag(qc, tag); +} + +/* + * Function : sata_dwc_qc_prep_by_tag + * arguments : ata_queued_cmd *qc, u8 tag + * Return value : None + * qc_prep for a particular queued command based on tag + */ +static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) +{ + struct ata_port *ap = qc->ap; + int dma_chan; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + int dir; + + // DMA direction + dir = qc->dma_dir; + + if ((dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) + return; + + dwc_dev_vdbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", + __func__, ap->port_no, dir_2_txt(dir), qc->n_elem); + + // Setup DMA for transfer + dma_chan = dma_dwc_xfer_setup(qc, hsdevp->llit[tag], + hsdevp->llit_dma[tag], + (void *__iomem)(&hsdev->sata_dwc_regs->dmadr)); + + if (unlikely(dma_chan < 0)) { + dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", + __func__, dma_chan); + return; + } + + hsdevp->dma_chan[tag] = dma_chan; +} + + + +/** + * ata_sff_exec_command - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA command, with proper synchronization with interrupt + * handler / other threads. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void sata_dwc_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) +{ + iowrite8(tf->command, ap->ioaddr.command_addr); + /* If we have an mmio device with no ctl and no altstatus + * method this will fail. No such devices are known to exist. + */ + if (ap->ioaddr.altstatus_addr) + ioread8(ap->ioaddr.altstatus_addr); + ndelay(400); +} + +/** + * sata_dwc_tf_to_host - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +static inline void sata_dwc_tf_to_host(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + dwc_port_dbg(ap,"%s\n",__func__); + ap->ops->sff_tf_load(ap, tf); + sata_dwc_exec_command(ap, tf); +} + + +/* + * Process command queue issue + */ +static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + int ret = 0; + struct ata_eh_info *ehi; + u32 scontrol, sstatus; + scontrol = sata_dwc_core_scr_read(ap, SCR_CONTROL); + + ehi = &ap->link.eh_info; + /* + * Fix the problem when PMP card is unplugged from the SATA port. + * QC is still issued but no device present. Ignore the current QC. + * and pass error to error handler + */ + sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS); + if ( sstatus == 0x0) { + ata_port_printk(ap, KERN_INFO, "Detect connection lost while commands are executing --> ignore current command\n"); + ata_ehi_hotplugged(ehi); + ap->link.eh_context.i.action |= ATA_EH_RESET; + return ret; + } + + // Set PMP field in the SCONTROL register + if ( sata_pmp_attached(ap) ) + sata_dwc_pmp_select(ap, qc->dev->link->pmp); + +#ifdef DEBUG_NCQ + if (qc->tag > 0 || ap->link.sactive > 1) { + dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s" + " ap active_tag=0x%08x ap sactive=0x%08x\n", + __func__, ap->print_id, qc->tf.command, + ata_cmd_2_txt(&qc->tf), qc->tag, + prot_2_txt(qc->tf.protocol), ap->link.active_tag, + ap->link.sactive); + } +#endif + + // Process NCQ + if (ata_is_ncq(qc->tf.protocol)) { + dwc_link_dbg(qc->dev->link, "%s --> process NCQ , ap->link.active_tag=0x%08x, active_tag=0%08x\n", __func__, ap->link.active_tag, qc->tag); + ap->link.active_tag = qc->tag; + ap->ops->sff_tf_load(ap, &qc->tf); + sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, + SATA_DWC_CMD_ISSUED_PENDING); + } else { + dwc_link_dbg(qc->dev->link, "%s --> non NCQ process, ap->link.active_tag=%d, active_tag=0%08x\n", __func__, ap->link.active_tag, qc->tag); + // Sync ata_port with qc->tag + ap->link.active_tag = qc->tag; + ret = ata_bmdma_qc_issue(qc); + } + + return ret; +} + +#if 0 +/* + * Function : sata_dwc_eng_timeout + * arguments : ata_port *ap + * Return value : None + * error handler for DMA time out + * ata_eng_timeout(ap) -- this does bmdma stuff which can not be done by this + * driver. SEE ALSO ata_qc_timeout(ap) + */ +static void sata_dwc_eng_timeout(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + struct ata_queued_cmd *qc; + u8 tag; + uint mask = 0x0; + unsigned long flags; + u32 serror, intpr, dma_ch; + + tag = ap->link.active_tag; + dma_ch = hsdevp->dma_chan[tag]; + qc = ata_qc_from_tag(ap, tag); + + dev_err(ap->dev, "%s: id=%d active_tag=%d qc=%p dma_chan=%d\n", + __func__, ap->print_id, tag, qc, dma_ch); + + intpr = in_le32(&hsdev->sata_dwc_regs->intpr); + serror = sata_dwc_core_scr_read(ap, SCR_ERROR); + + dev_err(ap->dev, "intpr=0x%08x serror=0x%08x\n", intpr, serror); + + /* If there are no error bits set, can we just pass this on to eh? */ + if (!(serror & SATA_DWC_SERR_ERR_BITS) && + !(intpr & SATA_DWC_INTPR_ERR)) { + + spin_lock_irqsave(&ap->host->lock, flags); + if (dma_dwc_channel_enabled(dma_ch)) + dma_dwc_terminate_dma(ap, dma_ch); + + hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; + + /* clear active bit */ + mask = (~(qcmd_tag_to_mask(tag))); + hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask; + hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask; + + spin_unlock_irqrestore(&ap->host->lock, flags); + } else { + /* This is wrong, what really needs to be done is a reset. */ + + spin_lock_irqsave(ap->lock, flags); + + if (ata_is_dma(qc->tf.protocol)) { + /* disable DMAC */ + dma_dwc_terminate_dma(ap, dma_ch); + } + + spin_unlock_irqrestore(ap->lock, flags); + } + WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); + if (qc->flags & ATA_QCFLAG_ACTIVE) { + qc->err_mask |= AC_ERR_TIMEOUT; + /* + * test-only: The original code (AMCC: 2.6.19) called + * ata_eng_timeout(ap) here. This function is not available + * anymore. So what to do now? + */ + } +} +#endif +/* + * Function : sata_dwc_qc_prep + * arguments : ata_queued_cmd *qc + * Return value : None + * qc_prep for a particular queued command + */ +static void sata_dwc_qc_prep(struct ata_queued_cmd *qc) +{ + u32 sactive; + u8 tag = qc->tag; + + if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) + return; + +#ifdef DEBUG_NCQ + if (qc->tag > 0) { + dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", + __func__, qc->tag, qc->ap->link.active_tag); + } +#endif + + if (qc->tf.protocol == ATA_PROT_NCQ) { + sactive = sata_dwc_core_scr_read(qc->ap, SCR_ACTIVE); + sactive |= (0x00000001 << tag); + sata_dwc_core_scr_write(qc->ap, SCR_ACTIVE, sactive); + dwc_dev_vdbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x " + "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive, + sactive); + } else { + tag = 0; + } + + sata_dwc_qc_prep_by_tag(qc, tag); +} + + + +static void sata_dwc_post_internal_cmd(struct ata_queued_cmd *qc) +{ + if (qc->flags & ATA_QCFLAG_FAILED) + ata_eh_freeze_port(qc->ap); +} + +static void sata_dwc_error_handler(struct ata_port *ap) +{ + u32 serror; + u32 intmr, errmr; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + + serror = sata_dwc_core_scr_read(ap, SCR_ERROR); + intmr = in_le32(&hsdev->sata_dwc_regs->intmr); + errmr = in_le32(&hsdev->sata_dwc_regs->errmr); + + //sata_dwc_dma_xfer_complete(ap,1); + dwc_port_dbg(ap, "%s: SERROR=0x%08x, INTMR=0x%08x, ERRMR=0x%08x\n", __func__, serror, intmr, errmr); + + dwc_port_vdbg(ap, "%s - sata_dwc_sactive_queued=0x%08x, sata_dwc_sactive_issued=0x%08x\n",__func__, hsdevp->sata_dwc_sactive_queued, hsdevp->sata_dwc_sactive_issued); + dwc_port_vdbg(ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr))); + dwc_port_vdbg(ap, "qc_active=0x%08x, qc_allocated=0x%08x, active_tag=%d\n", ap->qc_active, ap->qc_allocated, ap->link.active_tag); + + sata_pmp_error_handler(ap); +} + +/* + * sata_dwc_check_status - Get value of the Status Register + * @ap: Port to check + * + * Output content of the status register (CDR7) + */ +u8 sata_dwc_check_status(struct ata_port *ap) +{ + return ioread8(ap->ioaddr.status_addr); +} + + +/* + * Freeze the port by clear interrupt + * @ap: Port to freeze + */ +void sata_dwc_freeze(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + dwc_port_dbg(ap, "call %s ...\n",__func__); + // turn IRQ off + clear_intpr(hsdev); + clear_serror(ap); + out_le32(&hsdev->sata_dwc_regs->intmr, 0x0); +} + +/* + * Thaw the port by turning IRQ on + */ +void sata_dwc_thaw(struct ata_port *ap) +{ + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); + + dwc_port_dbg(ap, "call %s ...\n",__func__); + // Clear IRQ + clear_intpr(hsdev); + // Turn IRQ back on + sata_dwc_enable_interrupts(hsdev); +} + + +/* + * scsi mid-layer and libata interface structures + */ +static struct scsi_host_template sata_dwc_sht = { + ATA_NCQ_SHT(DRV_NAME), + /* + * test-only: Currently this driver doesn't handle NCQ + * correctly. We enable NCQ but set the queue depth to a + * max of 1. This will get fixed in in a future release. + */ +// .sg_tablesize = LIBATA_MAX_PRD, + .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */ + .dma_boundary = ATA_DMA_BOUNDARY, +}; + + +static struct ata_port_operations sata_dwc_ops = { + .inherits = &sata_pmp_port_ops, + .dev_config = sata_dwc_dev_config, + + .error_handler = sata_dwc_error_handler, + .softreset = sata_dwc_softreset, + .hardreset = sata_dwc_hardreset, + .pmp_softreset = sata_dwc_softreset, + .pmp_hardreset = sata_dwc_pmp_hardreset, + + .qc_defer = sata_pmp_qc_defer_cmd_switch, + .qc_prep = sata_dwc_qc_prep, + .qc_issue = sata_dwc_qc_issue, + .qc_fill_rtf = ata_sff_qc_fill_rtf, + + .scr_read = sata_dwc_scr_read, + .scr_write = sata_dwc_scr_write, + + .port_start = sata_dwc_port_start, + .port_stop = sata_dwc_port_stop, + + .bmdma_setup = sata_dwc_bmdma_setup, + .bmdma_start = sata_dwc_bmdma_start, + // Reuse some SFF functions + .sff_check_status = sata_dwc_check_status, + .sff_tf_read = ata_sff_tf_read, + .sff_data_xfer = ata_sff_data_xfer, + .sff_tf_load = ata_sff_tf_load, + .sff_dev_select = sata_dwc_dev_select, + .sff_exec_command = sata_dwc_exec_command, + + .sff_irq_on = sata_dwc_irq_on, +/* .sff_irq_clear = sata_dwc_irq_clear, + .freeze = sata_dwc_freeze, + .thaw = sata_dwc_thaw, + .sff_irq_on = ata_sff_irq_on, + */ + .sff_irq_clear = ata_bmdma_irq_clear, + .freeze = ata_sff_freeze, + .thaw = ata_sff_thaw, + .pmp_attach = sata_dwc_pmp_attach, + .pmp_detach = sata_dwc_pmp_detach, + .post_internal_cmd = sata_dwc_post_internal_cmd, + + /* test-only: really needed? */ + //.eng_timeout = sata_dwc_eng_timeout, +}; + +static const struct ata_port_info sata_dwc_port_info[] = { + { + /* + * test-only: Currently this driver doesn't handle NCQ + * correctly. So we disable NCQ here for now. To enable + * it ATA_FLAG_NCQ needs to be added to the flags below. + */ + .flags = ATA_FLAG_SATA | + ATA_FLAG_NCQ | + ATA_FLAG_PMP | ATA_FLAG_AN, + .pio_mask = ATA_PIO4, /* pio 0-4 */ + .udma_mask = ATA_UDMA6, + .port_ops = &sata_dwc_ops, + }, +}; + +static int sata_dwc_probe(struct platform_device *ofdev) +{ + struct sata_dwc_device *hsdev; + u32 idr, versionr; + char *ver = (char *)&versionr; + u8 *base = NULL; + int err = 0; + int irq; + struct ata_host *host; + struct ata_port_info pi = sata_dwc_port_info[0]; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct device_node *np = ofdev->dev.of_node; + const unsigned int *dma_channel; + /* + * Check if device is enabled + */ + if (!of_device_is_available(np)) { + printk(KERN_INFO "%s: Port disabled via device-tree\n", + np->full_name); + return 0; + } + + /* Allocate DWC SATA device */ + hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL); + if (hsdev == NULL) { + dev_err(&ofdev->dev, "kmalloc failed for hsdev\n"); + err = -ENOMEM; + goto error; + } + + + // Identify SATA DMA channel used for the current SATA device + dma_channel = of_get_property(np, "dma-channel", NULL); + if ( dma_channel ) { + dev_notice(&ofdev->dev, "Gettting DMA channel %d\n", *dma_channel); + hsdev->dma_channel = *dma_channel; + } else + hsdev->dma_channel = 0; + + /* Ioremap SATA registers */ + base = of_iomap(np, 0); + if (!base) { + dev_err(&ofdev->dev, "ioremap failed for SATA register address\n"); + err = -ENODEV; + goto error_kmalloc; + } + hsdev->reg_base = base; + dwc_dev_vdbg(&ofdev->dev, "ioremap done for SATA register address\n"); + + /* Synopsys DWC SATA specific Registers */ + hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); + + /* Allocate and fill host */ + host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); + if (!host) { + dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n"); + err = -ENOMEM; + goto error_iomap; + } + + host->private_data = hsdev; + + /* Setup port */ + host->ports[0]->ioaddr.cmd_addr = base; + host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; + hsdev->scr_base = (u8 *)(base + SATA_DWC_SCR_OFFSET); + sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base); + + /* Read the ID and Version Registers */ + idr = in_le32(&hsdev->sata_dwc_regs->idr); + versionr = in_le32(&hsdev->sata_dwc_regs->versionr); + dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", + idr, ver[0], ver[1], ver[2]); + + /* Get SATA DMA interrupt number */ + irq = irq_of_parse_and_map(np, 1); + if (irq == NO_IRQ) { + dev_err(&ofdev->dev, "no SATA DMA irq\n"); + err = -ENODEV; + goto error_out; + } + + /* Get physical SATA DMA register base address */ + if (!sata_dma_regs) { + sata_dma_regs = of_iomap(np, 1); + if (!sata_dma_regs) { + dev_err(&ofdev->dev, "ioremap failed for AHBDMA register address\n"); + err = -ENODEV; + goto error_out; + } + } + /* Save dev for later use in dev_xxx() routines */ + hsdev->dev = &ofdev->dev; + + /* Init glovbal dev list */ + dwc_dev_list[hsdev->dma_channel] = hsdev; + + /* Initialize AHB DMAC */ + hsdev->irq_dma = irq; + dma_dwc_init(hsdev); + dma_register_interrupt(hsdev); + + + /* Enable SATA Interrupts */ + sata_dwc_enable_interrupts(hsdev); + + /* Get SATA interrupt number */ + irq = irq_of_parse_and_map(np, 0); + if (irq == NO_IRQ) { + dev_err(&ofdev->dev, "no SATA irq\n"); + err = -ENODEV; + goto error_out; + } + + /* + * Now, register with libATA core, this will also initiate the + * device discovery process, invoking our port_start() handler & + * error_handler() to execute a dummy Softreset EH session + */ + ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); + + dev_set_drvdata(&ofdev->dev, host); + + /* Everything is fine */ + return 0; + +error_out: + /* Free SATA DMA resources */ + dma_dwc_exit(hsdev); + +error_iomap: + iounmap(base); +error_kmalloc: + kfree(hsdev); +error: + return err; +} + +static int sata_dwc_remove(struct platform_device *ofdev) +{ + struct device *dev = &ofdev->dev; + struct ata_host *host = dev_get_drvdata(dev); + struct sata_dwc_device *hsdev = host->private_data; + + ata_host_detach(host); + + dev_set_drvdata(dev, NULL); + + /* Free SATA DMA resources */ + dma_dwc_exit(hsdev); + + iounmap(hsdev->reg_base); + kfree(hsdev); + kfree(host); + + dwc_dev_vdbg(&ofdev->dev, "done\n"); + + return 0; +} + +static const struct of_device_id sata_dwc_match[] = { + { .compatible = "amcc,sata-460ex", }, + { .compatible = "amcc,sata-apm82181", }, + {} +}; +MODULE_DEVICE_TABLE(of, sata_dwc_match); + +static struct platform_driver sata_dwc_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = sata_dwc_match, + }, + .probe = sata_dwc_probe, + .remove = sata_dwc_remove, +}; + +module_platform_driver(sata_dwc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); +MODULE_DESCRIPTION("DesignWare Cores SATA controller driver"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1eca7b9760e..d761ad3ba09 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -195,6 +195,23 @@ config AMCC_PPC440SPE_ADMA help Enable support for the AMCC PPC440SPe RAID engines. +config AMCC_PPC460EX_460GT_4CHAN_DMA + tristate "AMCC PPC460EX PPC460GT PLB DMA support" + depends on 460EX || 460GT || APM821xx + select DMA_ENGINE + default y + +config APM82181_ADMA + tristate "APM82181 Asynchonous DMA support" + depends on APM821xx + select ASYNC_CORE + select ASYNC_TX_DMA + select DMA_ENGINE + select ARCH_HAS_ASYNC_TX_FIND_CHANNEL + default y + ---help--- + Enable support for the APM82181 Asynchonous DMA engines. + config TIMB_DMA tristate "Timberdale FPGA DMA support" depends on MFD_TIMBERDALE diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index c779e1eb2db..7acb4c437fa 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -21,7 +21,7 @@ obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o obj-$(CONFIG_SH_DMAE_BASE) += sh/ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o -obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ +obj-y += ppc4xx/ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o obj-$(CONFIG_MXS_DMA) += mxs-dma.o diff --git a/drivers/dma/ppc4xx/Makefile b/drivers/dma/ppc4xx/Makefile index b3d259b3e52..e7700985371 100644 --- a/drivers/dma/ppc4xx/Makefile +++ b/drivers/dma/ppc4xx/Makefile @@ -1 +1,3 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += adma.o +obj-$(CONFIG_AMCC_PPC460EX_460GT_4CHAN_DMA) += ppc460ex_4chan_dma.o +obj-$(CONFIG_APM82181_ADMA) += apm82181-adma.o diff --git a/drivers/dma/ppc4xx/apm82181-adma.c b/drivers/dma/ppc4xx/apm82181-adma.c new file mode 100644 index 00000000000..c95e704b1d9 --- /dev/null +++ b/drivers/dma/ppc4xx/apm82181-adma.c @@ -0,0 +1,2201 @@ +/* + * Copyright(c) 2010 Applied Micro Circuits Corporation(AMCC). All rights reserved. + * + * Author: Tai Tri Nguyen <ttnguyen@appliedmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ + +/* + * This driver supports the asynchrounous DMA copy and RAID engines available + * on the AppliedMicro APM82181 Processor. + * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) + * ADMA driver written by D.Williams. + */ +#define ADMA_DEBUG +#undef ADMA_DEBUG + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/async_tx.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <asm/dcr.h> +#include <asm/dcr-regs.h> +#include <asm/apm82181-adma.h> +#include "../dmaengine.h" + +#define PPC4XX_EDMA "apm82181-adma: " +#ifdef ADMA_DEBUG +#define DBG(string, args...) \ + printk(PPC4XX_EDMA string ,##args) +#define INFO DBG("<%s> -- line %d\n",__func__,__LINE__); +#define ADMA_HEXDUMP(b, l) \ + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \ + 16, 1, (b), (l), false); +#else +#define DBG(string, args...) \ + {if (0) printk(KERN_INFO PPC4XX_EDMA string ,##args); 0; } +#define INFO DBG(""); +#define ADMA_HEXDUMP(b, l) \ + {if (0) print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \ + 8, 1, (b), (l), false); 0;} +#endif + +#define MEM_HEXDUMP(b, l) \ + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \ + 16, 1, (b), (l), false); + +/* The list of channels exported by apm82181 ADMA */ +struct list_head +ppc_adma_chan_list = LIST_HEAD_INIT(ppc_adma_chan_list); + +/* This flag is set when want to refetch the xor chain in the interrupt + * handler + */ +static u32 do_xor_refetch = 0; + +/* Pointers to last submitted to DMA0/1/2/3 and XOR CDBs */ +static apm82181_desc_t *chan_last_sub[5]; +static apm82181_desc_t *chan_first_cdb[5]; + +/* Pointer to last linked and submitted xor CB */ +static apm82181_desc_t *xor_last_linked = NULL; +static apm82181_desc_t *xor_last_submit = NULL; + +/* /proc interface is used here to verify the h/w RAID 5 capabilities + */ +static struct proc_dir_entry *apm82181_proot; + +/* These are used in enable & check routines + */ +static u32 apm82181_xor_verified; +static u32 apm82181_memcpy_verified[4]; +static apm82181_ch_t *apm82181_dma_tchan[5]; +static struct completion apm82181_r5_test_comp; + +static inline int apm82181_chan_is_busy(apm82181_ch_t *chan); +#if 0 +static phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size) +{ + phys_addr_t page_4gb = 0; + + return (page_4gb | addr); +} +#endif +/** + * apm82181_adma_device_estimate - estimate the efficiency of processing + * the operation given on this channel. It's assumed that 'chan' is + * capable to process 'cap' type of operation. + * @chan: channel to use + * @cap: type of transaction + * @src_lst: array of source pointers + * @src_cnt: number of source operands + * @src_sz: size of each source operand + */ +int apm82181_adma_estimate (struct dma_chan *chan, + enum dma_transaction_type cap, struct page **src_lst, + int src_cnt, size_t src_sz) +{ + int ef = 1; + + /* channel idleness increases the priority */ + if (likely(ef) && + !apm82181_chan_is_busy(to_apm82181_adma_chan(chan))) + ef++; + else { + if(chan->chan_id !=APM82181_XOR_ID) + ef = -1; + } + return ef; +} + +/****************************************************************************** + * Command (Descriptor) Blocks low-level routines + ******************************************************************************/ +/** + * apm82181_desc_init_interrupt - initialize the descriptor for INTERRUPT + * pseudo operation + */ +static inline void apm82181_desc_init_interrupt (apm82181_desc_t *desc, + apm82181_ch_t *chan) +{ + xor_cb_t *p; + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + BUG(); + break; + case APM82181_XOR_ID: + p = desc->hw_desc; + memset (desc->hw_desc, 0, sizeof(xor_cb_t)); + /* NOP with Command Block Complete Enable */ + p->cbc = XOR_CBCR_CBCE_BIT; + break; + default: + printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id, + __FUNCTION__); + break; + } +} + +/** + * apm82181_desc_init_xor - initialize the descriptor for XOR operation + */ +static inline void apm82181_desc_init_xor(apm82181_desc_t *desc, int src_cnt, + unsigned long flags) +{ + xor_cb_t *hw_desc = desc->hw_desc; + + memset (desc->hw_desc, 0, sizeof(xor_cb_t)); + desc->hw_next = NULL; + desc->src_cnt = src_cnt; + desc->dst_cnt = 1; + + hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt; + if (flags & DMA_PREP_INTERRUPT) + /* Enable interrupt on complete */ + hw_desc->cbc |= XOR_CBCR_CBCE_BIT; +} + +/** + * apm82181_desc_init_memcpy - initialize the descriptor for MEMCPY operation + */ +static inline void apm82181_desc_init_memcpy(apm82181_desc_t *desc, + unsigned long flags) +{ + dma_cdb_t *hw_desc = desc->hw_desc; + + memset(hw_desc, 0, sizeof(dma_cdb_t)); + desc->hw_next = NULL; + desc->src_cnt = 1; + desc->dst_cnt = 1; + + if (flags & DMA_PREP_INTERRUPT) + set_bit(APM82181_DESC_INT, &desc->flags); + else + clear_bit(APM82181_DESC_INT, &desc->flags); + /* dma configuration for running */ + hw_desc->ctrl.tm = 2; /* soft init mem-mem mode */ + hw_desc->ctrl.pw = 4; /* transfer width 128 bytes */ + hw_desc->ctrl.ben = 1;/* buffer enable */ + hw_desc->ctrl.sai = 1;/* increase source addr */ + hw_desc->ctrl.dai = 1;/* increase dest addr */ + hw_desc->ctrl.tce = 1;/* chan stops when TC is reached */ + hw_desc->ctrl.cp = 3; /* hinghest priority */ + hw_desc->ctrl.sl = 0; /* source is in PLB */ + hw_desc->ctrl.pl = 0; /* dest is in PLB */ + hw_desc->cnt.tcie = 0;/* no interrupt on init */ + hw_desc->cnt.etie = 0; /* enable error interrupt */ + hw_desc->cnt.eie = 1; /* enable error interrupt */ + hw_desc->cnt.link = 0;/* not link to next cdb */ + hw_desc->cnt.sgl = 0; + hw_desc->ctrl.ce =1; /* enable channel */ + hw_desc->ctrl.cie =1; /* enable int channel */ +} + +/** + * apm82181_desc_init_memset - initialize the descriptor for MEMSET operation + */ +static inline void apm82181_desc_init_memset(apm82181_desc_t *desc, int value, + unsigned long flags) +{ + //dma_cdb_t *hw_desc = desc->hw_desc; + + memset (desc->hw_desc, 0, sizeof(dma_cdb_t)); + desc->hw_next = NULL; + desc->src_cnt = 1; + desc->dst_cnt = 1; + + if (flags & DMA_PREP_INTERRUPT) + set_bit(APM82181_DESC_INT, &desc->flags); + else + clear_bit(APM82181_DESC_INT, &desc->flags); + +} + + + +/** + * apm82181_desc_set_src_addr - set source address into the descriptor + */ +static inline void apm82181_desc_set_src_addr( apm82181_desc_t *desc, + apm82181_ch_t *chan, int src_idx, dma_addr_t addr) +{ + dma_cdb_t *dma_hw_desc; + xor_cb_t *xor_hw_desc; + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + dma_hw_desc = desc->hw_desc; + dma_hw_desc->src_hi = (u32)(addr >> 32); + dma_hw_desc->src_lo = (u32)addr; + break; + case APM82181_XOR_ID: + xor_hw_desc = desc->hw_desc; + xor_hw_desc->ops[src_idx].h = (u32)(addr >>32); + xor_hw_desc->ops[src_idx].l = (u32)addr; + break; + } +} + +static void apm82181_adma_set_src(apm82181_desc_t *sw_desc, + dma_addr_t addr, int index) +{ + apm82181_ch_t *chan = to_apm82181_adma_chan(sw_desc->async_tx.chan); + + sw_desc = sw_desc->group_head; + + if (likely(sw_desc)) + apm82181_desc_set_src_addr(sw_desc, chan, index, addr); +} + +/** + * apm82181_desc_set_dest_addr - set destination address into the descriptor + */ +static inline void apm82181_desc_set_dest_addr(apm82181_desc_t *desc, + apm82181_ch_t *chan, dma_addr_t addr, u32 index) +{ + dma_cdb_t *dma_hw_desc; + xor_cb_t *xor_hw_desc; + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + dma_hw_desc = desc->hw_desc; + dma_hw_desc->dest_hi = (u32)(addr >> 32); + dma_hw_desc->dest_lo = (u32)addr; + break; + case APM82181_XOR_ID: + xor_hw_desc = desc->hw_desc; + xor_hw_desc->cbtah = (u32)(addr >> 32); + xor_hw_desc->cbtal |= (u32)addr; + break; + } +} + +static int plbdma_get_transfer_width(dma_cdb_t *dma_hw_desc) +{ + switch (dma_hw_desc->ctrl.pw){ + case 0: + return 1; /* unit: bytes */ + case 1: + return 2; + case 2: + return 4; + case 3: + return 8; + case 4: + return 16; + } + return 0; +} +/** + * apm82181_desc_set_byte_count - set number of data bytes involved + * into the operation + */ +static inline void apm82181_desc_set_byte_count(apm82181_desc_t *desc, + apm82181_ch_t *chan, size_t byte_count) +{ + dma_cdb_t *dma_hw_desc; + xor_cb_t *xor_hw_desc; + int terminal_cnt, transfer_width = 0; + + DBG("<%s> byte_count %08x\n", __func__,byte_count); + switch (chan->device->id){ + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + dma_hw_desc = desc->hw_desc; + transfer_width = plbdma_get_transfer_width(dma_hw_desc); + terminal_cnt = byte_count/transfer_width; + dma_hw_desc->cnt.tc = terminal_cnt; + break; + case APM82181_XOR_ID: + xor_hw_desc = desc->hw_desc; + xor_hw_desc->cbbc = byte_count; + break; + } +} + +/** + * apm82181_xor_set_link - set link address in xor CB + */ +static inline void apm82181_xor_set_link (apm82181_desc_t *prev_desc, + apm82181_desc_t *next_desc) +{ + xor_cb_t *xor_hw_desc = prev_desc->hw_desc; + + if (unlikely(!next_desc || !(next_desc->phys))) { + printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n", + __func__, next_desc, + next_desc ? next_desc->phys : 0); + BUG(); + } + DBG("<%s>:next_desc->phys %llx\n", __func__,next_desc->phys); + xor_hw_desc->cbs = 0; + xor_hw_desc->cblal = (u32)next_desc->phys; + xor_hw_desc->cblah = (u32)(next_desc->phys >> 32); + xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT; +} + +/** + * apm82181_desc_set_link - set the address of descriptor following this + * descriptor in chain + */ +static inline void apm82181_desc_set_link(apm82181_ch_t *chan, + apm82181_desc_t *prev_desc, apm82181_desc_t *next_desc) +{ + unsigned long flags; + apm82181_desc_t *tail = next_desc; + + if (unlikely(!prev_desc || !next_desc || + (prev_desc->hw_next && prev_desc->hw_next != next_desc))) { + /* If previous next is overwritten something is wrong. + * though we may refetch from append to initiate list + * processing; in this case - it's ok. + */ + printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; " + "prev->hw_next=0x%p\n", __FUNCTION__, prev_desc, + next_desc, prev_desc ? prev_desc->hw_next : 0); + BUG(); + } + + local_irq_save(flags); + + /* do s/w chaining both for DMA and XOR descriptors */ + prev_desc->hw_next = next_desc; + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + break; + case APM82181_XOR_ID: + /* bind descriptor to the chain */ + while (tail->hw_next) + tail = tail->hw_next; + xor_last_linked = tail; + + if (prev_desc == xor_last_submit) + /* do not link to the last submitted CB */ + break; + apm82181_xor_set_link (prev_desc, next_desc); + break; + default: + BUG(); + } + + local_irq_restore(flags); +} + +/** + * apm82181_desc_get_src_addr - extract the source address from the descriptor + */ +static inline u32 apm82181_desc_get_src_addr(apm82181_desc_t *desc, + apm82181_ch_t *chan, int src_idx) +{ + dma_cdb_t *dma_hw_desc; + + dma_hw_desc = desc->hw_desc; + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + break; + default: + return 0; + } + /* May have 0, 1, 2, or 3 sources */ + return (dma_hw_desc->src_lo); +} + +/** + * apm82181_desc_get_dest_addr - extract the destination address from the + * descriptor + */ +static inline u32 apm82181_desc_get_dest_addr(apm82181_desc_t *desc, + apm82181_ch_t *chan, int idx) +{ + dma_cdb_t *dma_hw_desc; + + dma_hw_desc = desc->hw_desc; + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + break; + default: + return 0; + } + + /* May have 0, 1, 2, or 3 sources */ + return (dma_hw_desc->dest_lo); +} + +/** + * apm82181_desc_get_byte_count - extract the byte count from the descriptor + */ +static inline u32 apm82181_desc_get_byte_count(apm82181_desc_t *desc, + apm82181_ch_t *chan) +{ + dma_cdb_t *dma_hw_desc; + + dma_hw_desc = desc->hw_desc; + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + break; + default: + return 0; + } + /* May have 0, 1, 2, or 3 sources */ + //return (dma_hw_desc->cnt); +} + + +/** + * apm82181_desc_get_link - get the address of the descriptor that + * follows this one + */ +static inline u32 apm82181_desc_get_link(apm82181_desc_t *desc, + apm82181_ch_t *chan) +{ + if (!desc->hw_next) + return 0; + + return desc->hw_next->phys; +} + +/** + * apm82181_desc_is_aligned - check alignment + */ +static inline int apm82181_desc_is_aligned(apm82181_desc_t *desc, + int num_slots) +{ + return (desc->idx & (num_slots - 1)) ? 0 : 1; +} + + + +/****************************************************************************** + * ADMA channel low-level routines + ******************************************************************************/ + +static inline phys_addr_t apm82181_chan_get_current_descriptor(apm82181_ch_t *chan); +static inline void apm82181_chan_append(apm82181_ch_t *chan); + +/* + * apm82181_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine + */ +static inline void apm82181_adma_device_clear_eot_status (apm82181_ch_t *chan) +{ + u32 val ; + int idx = chan->device->id; + volatile xor_regs_t *xor_reg; + INFO; + switch (idx) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + val = mfdcr(DCR_DMA2P40_SR); + if(val & DMA_SR_RI(idx)){ + printk(KERN_ERR "Err occurred, DMA%d status: 0x%x\n", idx, val); + } + /* TC reached int, write back to clear */ + mtdcr(DCR_DMA2P40_SR, val); + break; + case APM82181_XOR_ID: + /* reset status bits to ack*/ + xor_reg = chan->device->xor_base; + + val = xor_reg->sr; + DBG("XOR engine status: 0x%08x\n", val); + xor_reg->sr = val; + + if (val & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) { + if (val & XOR_IE_RPTIE_BIT) { + /* Read PLB Timeout Error. + * Try to resubmit the CB + */ + INFO; + xor_reg->cblalr = xor_reg->ccbalr; + xor_reg->crsr |= XOR_CRSR_XAE_BIT; + } else + printk (KERN_ERR "XOR ERR 0x%x status\n", val); + break; + } + + /* if the XORcore is idle, but there are unprocessed CBs + * then refetch the s/w chain here + */ + if (!(xor_reg->sr & XOR_SR_XCP_BIT) && do_xor_refetch) { + apm82181_chan_append(chan); + } + break; + } +} + +/* + * apm82181_chan_is_busy - get the channel status + */ + +static inline int apm82181_chan_is_busy(apm82181_ch_t *chan) +{ + int busy = 0; + volatile xor_regs_t *xor_reg = chan->device->xor_base; + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + if(mfdcr(DCR_DMA2P40_SR) & DMA_SR_CB(chan->device->id)) + busy = 1; + else + busy = 0; + break; + case APM82181_XOR_ID: + /* use the special status bit for the XORcore + */ + busy = (xor_reg->sr & XOR_SR_XCP_BIT) ? 1 : 0; + break; + default: + BUG(); + } + + return busy; +} + +/** + * apm82181_dma_put_desc - put PLB DMA 0/1/2/3 descriptor to FIFO + */ +static inline void apm82181_dma_put_desc(apm82181_ch_t *chan, + apm82181_desc_t *desc) +{ + dma_cdb_t *cdb = desc->hw_desc; + u32 sg_cmd = 0; + + /* Enable TC interrupt */ + if(test_bit(APM82181_DESC_INT, &desc->flags)) + cdb->cnt.tcie = 1; + else + cdb->cnt.tcie = 0; + /* Not link to next cdb */ + cdb->sg_hi = 0xffffffff; + cdb->sg_lo = 0xffffffff; + + chan_last_sub[chan->device->id] = desc; + + /* Update new cdb addr */ + mtdcr(DCR_DMA2P40_SGHx(chan->device->id), (u32)(desc->phys >> 32)); + mtdcr(DCR_DMA2P40_SGLx(chan->device->id), (u32)desc->phys); + + INFO; + DBG("slot id: %d addr: %llx\n", desc->idx, desc->phys); + DBG("S/G addr H: %08x addr L: %08x\n", + mfdcr(DCR_DMA2P40_SGHx(chan->device->id)), + mfdcr(DCR_DMA2P40_SGLx(chan->device->id))); + ADMA_HEXDUMP(cdb, 96); + /* Enable S/G */ + sg_cmd |= (DMA_SGC_SSG(chan->device->id) | DMA_SGC_EM_ALL); + sg_cmd |= DMA_SGC_SGL(chan->device->id, 0); /* S/G addr in PLB */ + + mtdcr(DCR_DMA2P40_SGC, sg_cmd); + DBG("S/G addr H: %08x addr L: %08x\n", + mfdcr(DCR_DMA2P40_SGHx(chan->device->id)), + mfdcr(DCR_DMA2P40_SGLx(chan->device->id))); + /* need to use variable for logging current CDB */ + chan->current_cdb_addr = desc->phys; + +} + +/** + * apm82181_chan_append - update the h/w chain in the channel + */ +static inline void apm82181_chan_append(apm82181_ch_t *chan) +{ + apm82181_desc_t *iter; + volatile xor_regs_t *xor_reg; + phys_addr_t cur_desc; + xor_cb_t *xcb; + unsigned long flags; + INFO; + + local_irq_save(flags); + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + cur_desc = apm82181_chan_get_current_descriptor(chan); + DBG("current_desc %llx\n", cur_desc); + if (likely(cur_desc)) { + INFO; + iter = chan_last_sub[chan->device->id]; + BUG_ON(!iter); + } else { + INFO; + /* first peer */ + iter = chan_first_cdb[chan->device->id]; + BUG_ON(!iter); + INFO; + apm82181_dma_put_desc(chan, iter); + chan->hw_chain_inited = 1; + } + + /* is there something new to append */ + if (!iter->hw_next) + break; + + /* flush descriptors from the s/w queue to fifo */ + list_for_each_entry_continue(iter, &chan->chain, chain_node) { + apm82181_dma_put_desc(chan, iter); + if (!iter->hw_next) + break; + } + break; + case APM82181_XOR_ID: + /* update h/w links and refetch */ + if (!xor_last_submit->hw_next) + break; + xor_reg = chan->device->xor_base; + /* the last linked CDB has to generate an interrupt + * that we'd be able to append the next lists to h/w + * regardless of the XOR engine state at the moment of + * appending of these next lists + */ + xcb = xor_last_linked->hw_desc; + xcb->cbc |= XOR_CBCR_CBCE_BIT; + + if (!(xor_reg->sr & XOR_SR_XCP_BIT)) { + /* XORcore is idle. Refetch now */ + do_xor_refetch = 0; + apm82181_xor_set_link(xor_last_submit, + xor_last_submit->hw_next); + + xor_last_submit = xor_last_linked; + xor_reg->crsr |= XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT; + } else { + /* XORcore is running. Refetch later in the handler */ + do_xor_refetch = 1; + } + + break; + } + + local_irq_restore(flags); +} + +/** + * apm82181_chan_get_current_descriptor - get the currently executed descriptor + */ +static inline phys_addr_t apm82181_chan_get_current_descriptor(apm82181_ch_t *chan) +{ + phys_addr_t curr_cdb_addr; + volatile xor_regs_t *xor_reg; + int idx = chan->device->id; + + if (unlikely(!chan->hw_chain_inited)) + /* h/w descriptor chain is not initialized yet */ + return 0; + switch(idx){ + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + curr_cdb_addr = chan->current_cdb_addr; + break; + case APM82181_XOR_ID: + xor_reg = chan->device->xor_base; + curr_cdb_addr = (dma_addr_t)xor_reg->ccbahr; + curr_cdb_addr = (curr_cdb_addr << 32) | xor_reg->ccbalr; + break; + default: + BUG(); + } + return curr_cdb_addr; +} + + +/****************************************************************************** + * ADMA device level + ******************************************************************************/ + +static int apm82181_adma_alloc_chan_resources(struct dma_chan *chan); +static dma_cookie_t apm82181_adma_tx_submit( + struct dma_async_tx_descriptor *tx); + +static void apm82181_adma_set_dest( + apm82181_desc_t *tx, + dma_addr_t addr, int index); + +/** + * apm82181_get_group_entry - get group entry with index idx + * @tdesc: is the last allocated slot in the group. + */ +static inline apm82181_desc_t * +apm82181_get_group_entry ( apm82181_desc_t *tdesc, u32 entry_idx) +{ + apm82181_desc_t *iter = tdesc->group_head; + int i = 0; + + if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) { + printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n", + __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt); + BUG(); + } + list_for_each_entry(iter, &tdesc->group_list, chain_node) { + if (i++ == entry_idx) + break; + } + return iter; +} + +/** + * apm82181_adma_free_slots - flags descriptor slots for reuse + * @slot: Slot to free + * Caller must hold &apm82181_chan->lock while calling this function + */ +static void apm82181_adma_free_slots(apm82181_desc_t *slot, + apm82181_ch_t *chan) +{ + int stride = slot->slots_per_op; + + while (stride--) { + /*async_tx_clear_ack(&slot->async_tx);*/ /* Don't need to clear. It is hack*/ + slot->slots_per_op = 0; + slot = list_entry(slot->slot_node.next, + apm82181_desc_t, + slot_node); + } +} + +static void +apm82181_adma_unmap(apm82181_ch_t *chan, apm82181_desc_t *desc) +{ + u32 src_cnt, dst_cnt; + dma_addr_t addr; + /* + * get the number of sources & destination + * included in this descriptor and unmap + * them all + */ + src_cnt = 1; + dst_cnt = 1; +} +/** + * apm82181_adma_run_tx_complete_actions - call functions to be called + * upon complete + */ +static dma_cookie_t apm82181_adma_run_tx_complete_actions( + apm82181_desc_t *desc, + apm82181_ch_t *chan, + dma_cookie_t cookie) +{ + int i; + //enum dma_data_direction dir; + INFO; + BUG_ON(desc->async_tx.cookie < 0); + if (desc->async_tx.cookie > 0) { + cookie = desc->async_tx.cookie; + desc->async_tx.cookie = 0; + + /* call the callback (must not sleep or submit new + * operations to this channel) + */ + if (desc->async_tx.callback) + desc->async_tx.callback( + desc->async_tx.callback_param); + + dma_descriptor_unmap(&desc->async_tx); + /* unmap dma addresses + * (unmap_single vs unmap_page?) + * + * actually, ppc's dma_unmap_page() functions are empty, so + * the following code is just for the sake of completeness + */ + if (chan && chan->needs_unmap && desc->group_head && + desc->unmap_len) { + apm82181_desc_t *unmap = desc->group_head; + /* assume 1 slot per op always */ + u32 slot_count = unmap->slot_cnt; + + /* Run through the group list and unmap addresses */ + for (i = 0; i < slot_count; i++) { + BUG_ON(!unmap); + apm82181_adma_unmap(chan, unmap); + unmap = unmap->hw_next; + } + desc->group_head = NULL; + } + } + + /* run dependent operations */ + dma_run_dependencies(&desc->async_tx); + + return cookie; +} + +/** + * apm82181_adma_clean_slot - clean up CDB slot (if ack is set) + */ +static int apm82181_adma_clean_slot(apm82181_desc_t *desc, + apm82181_ch_t *chan) +{ + /* the client is allowed to attach dependent operations + * until 'ack' is set + */ + if (!async_tx_test_ack(&desc->async_tx)) + return 0; + + /* leave the last descriptor in the chain + * so we can append to it + */ + if (list_is_last(&desc->chain_node, &chan->chain) || + desc->phys == apm82181_chan_get_current_descriptor(chan)) + return 1; + + dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n", + desc->phys, desc->idx, desc->slots_per_op); + + list_del(&desc->chain_node); + apm82181_adma_free_slots(desc, chan); + return 0; +} + +/** + * __apm82181_adma_slot_cleanup - this is the common clean-up routine + * which runs through the channel CDBs list until reach the descriptor + * currently processed. When routine determines that all CDBs of group + * are completed then corresponding callbacks (if any) are called and slots + * are freed. + */ +static void __apm82181_adma_slot_cleanup(apm82181_ch_t *chan) +{ + apm82181_desc_t *iter, *_iter, *group_start = NULL; + dma_cookie_t cookie = 0; + phys_addr_t current_desc = apm82181_chan_get_current_descriptor(chan); + int busy = apm82181_chan_is_busy(chan); + int seen_current = 0, slot_cnt = 0, slots_per_op = 0; + + DBG("apm82181 adma%d: %s\n", + chan->device->id, __FUNCTION__); + DBG("current_desc %llx\n", current_desc); + + if (!current_desc) { + /* There were no transactions yet, so + * nothing to clean + */ + return; + } + + /* free completed slots from the chain starting with + * the oldest descriptor + */ + list_for_each_entry_safe(iter, _iter, &chan->chain, + chain_node) { + DBG(" cookie: %d slot: %d " + "busy: %d this_desc: %llx next_desc: %x cur: %llx ack: %d\n", + iter->async_tx.cookie, iter->idx, busy, iter->phys, + apm82181_desc_get_link(iter, chan), current_desc, + async_tx_test_ack(&iter->async_tx)); + prefetch(_iter); + prefetch(&_iter->async_tx); + + /* do not advance past the current descriptor loaded into the + * hardware channel,subsequent descriptors are either in process + * or have not been submitted + */ + if (seen_current) + break; + + /* stop the search if we reach the current descriptor and the + * channel is busy, or if it appears that the current descriptor + * needs to be re-read (i.e. has been appended to) + */ + if (iter->phys == current_desc) { + BUG_ON(seen_current++); + if (busy || apm82181_desc_get_link(iter, chan)) { + /* not all descriptors of the group have + * been completed; exit. + */ + break; + } + } + + /* detect the start of a group transaction */ + if (!slot_cnt && !slots_per_op) { + slot_cnt = iter->slot_cnt; + slots_per_op = iter->slots_per_op; + if (slot_cnt <= slots_per_op) { + slot_cnt = 0; + slots_per_op = 0; + } + } + + if (slot_cnt) { + if (!group_start) + group_start = iter; + slot_cnt -= slots_per_op; + } + + /* all the members of a group are complete */ + if (slots_per_op != 0 && slot_cnt == 0) { + apm82181_desc_t *grp_iter, *_grp_iter; + int end_of_chain = 0; + + /* clean up the group */ + slot_cnt = group_start->slot_cnt; + grp_iter = group_start; + list_for_each_entry_safe_from(grp_iter, _grp_iter, + &chan->chain, chain_node) { + + cookie = apm82181_adma_run_tx_complete_actions( + grp_iter, chan, cookie); + + slot_cnt -= slots_per_op; + end_of_chain = apm82181_adma_clean_slot( + grp_iter, chan); + if (end_of_chain && slot_cnt) { + /* Should wait for ZeroSum complete */ + if (cookie > 0) + chan->common.completed_cookie = cookie; + return; + } + + if (slot_cnt == 0 || end_of_chain) + break; + } + + /* the group should be complete at this point */ + BUG_ON(slot_cnt); + + slots_per_op = 0; + group_start = NULL; + if (end_of_chain) + break; + else + continue; + } else if (slots_per_op) /* wait for group completion */ + continue; + + cookie = apm82181_adma_run_tx_complete_actions(iter, chan, + cookie); + + if (apm82181_adma_clean_slot(iter, chan)) + break; + } + + BUG_ON(!seen_current); + + if (cookie > 0) { + chan->common.completed_cookie = cookie; + DBG("completed cookie %d\n", cookie); + } + +} + +/** + * apm82181_adma_tasklet - clean up watch-dog initiator + */ +static void apm82181_adma_tasklet (unsigned long data) +{ + apm82181_ch_t *chan = (apm82181_ch_t *) data; + spin_lock(&chan->lock); + INFO; + __apm82181_adma_slot_cleanup(chan); + spin_unlock(&chan->lock); +} + +/** + * apm82181_adma_slot_cleanup - clean up scheduled initiator + */ +static void apm82181_adma_slot_cleanup (apm82181_ch_t *chan) +{ + spin_lock_bh(&chan->lock); + __apm82181_adma_slot_cleanup(chan); + spin_unlock_bh(&chan->lock); +} + +/** + * apm82181_adma_alloc_slots - allocate free slots (if any) + */ +static apm82181_desc_t *apm82181_adma_alloc_slots( + apm82181_ch_t *chan, int num_slots, + int slots_per_op) +{ + apm82181_desc_t *iter = NULL, *_iter, *alloc_start = NULL; + struct list_head chain = LIST_HEAD_INIT(chain); + int slots_found, retry = 0; + + + BUG_ON(!num_slots || !slots_per_op); + /* start search from the last allocated descrtiptor + * if a contiguous allocation can not be found start searching + * from the beginning of the list + */ +retry: + slots_found = 0; + if (retry == 0) + iter = chan->last_used; + else + iter = list_entry(&chan->all_slots, apm82181_desc_t, + slot_node); + prefetch(iter); + DBG("---iter at %p idx %d\n ",iter,iter->idx); + list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots, + slot_node) { + prefetch(_iter); + prefetch(&_iter->async_tx); + if (iter->slots_per_op) { + slots_found = 0; + continue; + } + + /* start the allocation if the slot is correctly aligned */ + if (!slots_found++) + alloc_start = iter; + if (slots_found == num_slots) { + apm82181_desc_t *alloc_tail = NULL; + apm82181_desc_t *last_used = NULL; + iter = alloc_start; + while (num_slots) { + int i; + + /* pre-ack all but the last descriptor */ + if (num_slots != slots_per_op) { + async_tx_ack(&iter->async_tx); + } + list_add_tail(&iter->chain_node, &chain); + alloc_tail = iter; + iter->async_tx.cookie = 0; + iter->hw_next = NULL; + iter->flags = 0; + iter->slot_cnt = num_slots; + for (i = 0; i < slots_per_op; i++) { + iter->slots_per_op = slots_per_op - i; + last_used = iter; + iter = list_entry(iter->slot_node.next, + apm82181_desc_t, + slot_node); + } + num_slots -= slots_per_op; + } + alloc_tail->group_head = alloc_start; + alloc_tail->async_tx.cookie = -EBUSY; + list_splice(&chain, &alloc_tail->group_list); + chan->last_used = last_used; + DBG("---slot allocated at %llx idx %d, hw_desc %p tx_ack %d\n", + alloc_tail->phys, alloc_tail->idx, alloc_tail->hw_desc, + async_tx_test_ack(&alloc_tail->async_tx)); + return alloc_tail; + } + } + if (!retry++) + goto retry; +#ifdef ADMA_DEBUG + static int empty_slot_cnt; + if(!(empty_slot_cnt%100)) + printk(KERN_INFO"No empty slots trying to free some\n"); + empty_slot_cnt++; +#endif + /* try to free some slots if the allocation fails */ + tasklet_schedule(&chan->irq_tasklet); + return NULL; +} + +/** + * apm82181_chan_xor_slot_count - get the number of slots necessary for + * XOR operation + */ +static inline int apm82181_chan_xor_slot_count(size_t len, int src_cnt, + int *slots_per_op) +{ + int slot_cnt; + + /* each XOR descriptor provides up to 16 source operands */ + slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS; + + if (likely(len <= APM82181_ADMA_XOR_MAX_BYTE_COUNT)) + return slot_cnt; + + printk(KERN_ERR "%s: len %d > max %d !!\n", + __func__, len, APM82181_ADMA_XOR_MAX_BYTE_COUNT); + BUG(); + return slot_cnt; +} + +/** + * apm82181_desc_init_null_xor - initialize the descriptor for NULL XOR + * pseudo operation + */ +static inline void apm82181_desc_init_null_xor(apm82181_desc_t *desc) +{ + memset (desc->hw_desc, 0, sizeof(xor_cb_t)); + desc->hw_next = NULL; + desc->src_cnt = 0; + desc->dst_cnt = 1; +} +/** + * apm82181_chan_set_first_xor_descriptor - initi XORcore chain + */ +static inline void apm82181_chan_set_first_xor_descriptor(apm82181_ch_t *chan, + apm82181_desc_t *next_desc) +{ + volatile xor_regs_t *xor_reg; + + xor_reg = chan->device->xor_base; + + if (xor_reg->sr & XOR_SR_XCP_BIT) + printk(KERN_INFO "%s: Warn: XORcore is running " + "when try to set the first CDB!\n", + __func__); + + xor_last_submit = xor_last_linked = next_desc; + + xor_reg->crsr = XOR_CRSR_64BA_BIT; + + xor_reg->cblalr = next_desc->phys; + xor_reg->cblahr = 0; + xor_reg->cbcr |= XOR_CBCR_LNK_BIT; + + chan->hw_chain_inited = 1; +} +/** + * apm82181_chan_start_null_xor - initiate the first XOR operation (DMA engines + * use FIFOs (as opposite to chains used in XOR) so this is a XOR + * specific operation) + */ +static void apm82181_chan_start_null_xor(apm82181_ch_t *chan) +{ + apm82181_desc_t *sw_desc, *group_start; + dma_cookie_t cookie; + int slot_cnt, slots_per_op; + volatile xor_regs_t *xor_reg = chan->device->xor_base; + + dev_dbg(chan->device->common.dev, + "apm82181 adma%d: %s\n", chan->device->id, __func__); + INFO; + spin_lock_bh(&chan->lock); + slot_cnt = apm82181_chan_xor_slot_count(0, 2, &slots_per_op); + sw_desc = apm82181_adma_alloc_slots(chan, slot_cnt, slots_per_op); + if (sw_desc) { + INFO; + group_start = sw_desc->group_head; + list_splice_init(&sw_desc->group_list, &chan->chain); + async_tx_ack(&sw_desc->async_tx); + apm82181_desc_init_null_xor(group_start); + INFO; + + cookie = chan->common.cookie; + cookie++; + if (cookie <= 1) + cookie = 2; + + /* initialize the completed cookie to be less than + * the most recently used cookie + */ + chan->common.completed_cookie = cookie - 1; + chan->common.cookie = sw_desc->async_tx.cookie = cookie; + + /* channel should not be busy */ + BUG_ON(apm82181_chan_is_busy(chan)); + + /* set the descriptor address */ + apm82181_chan_set_first_xor_descriptor(chan, sw_desc); + + /* run the descriptor */ + xor_reg->crsr = XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT; + } else + printk(KERN_ERR "apm82181 adma%d" + " failed to allocate null descriptor\n", + chan->device->id); + spin_unlock_bh(&chan->lock); +} + +/** + * apm82181_adma_alloc_chan_resources - allocate pools for CDB slots + */ +static int apm82181_adma_alloc_chan_resources(struct dma_chan *chan) +{ + apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan); + apm82181_desc_t *slot = NULL; + char *hw_desc; + int i, db_sz; + int init = apm82181_chan->slots_allocated ? 0 : 1; + + chan->chan_id = apm82181_chan->device->id; + + /* Allocate descriptor slots */ + i = apm82181_chan->slots_allocated; + if (apm82181_chan->device->id != APM82181_XOR_ID) + db_sz = sizeof (dma_cdb_t); + else + db_sz = sizeof (xor_cb_t); + + for (; i < (apm82181_chan->device->pool_size/db_sz); i++) { + slot = kzalloc(sizeof(apm82181_desc_t), GFP_KERNEL); + if (!slot) { + printk(KERN_INFO "APM82181/GT ADMA Channel only initialized" + " %d descriptor slots", i--); + break; + } + + hw_desc = (char *) apm82181_chan->device->dma_desc_pool_virt; + slot->hw_desc = (void *) &hw_desc[i * db_sz]; + dma_async_tx_descriptor_init(&slot->async_tx, chan); + slot->async_tx.tx_submit = apm82181_adma_tx_submit; + INIT_LIST_HEAD(&slot->chain_node); + INIT_LIST_HEAD(&slot->slot_node); + INIT_LIST_HEAD(&slot->group_list); + slot->phys = apm82181_chan->device->dma_desc_pool + i * db_sz; + slot->idx = i; + spin_lock_bh(&apm82181_chan->lock); + apm82181_chan->slots_allocated++; + list_add_tail(&slot->slot_node, &apm82181_chan->all_slots); + spin_unlock_bh(&apm82181_chan->lock); + } + + if (i && !apm82181_chan->last_used) { + apm82181_chan->last_used = + list_entry(apm82181_chan->all_slots.next, + apm82181_desc_t, + slot_node); + } + + printk("apm82181 adma%d: allocated %d descriptor slots\n", + apm82181_chan->device->id, i); + + /* initialize the channel and the chain with a null operation */ + if (init) { + switch (apm82181_chan->device->id) + { + apm82181_chan->hw_chain_inited = 0; + case APM82181_PDMA0_ID: + apm82181_dma_tchan[0] = apm82181_chan; + break; + case APM82181_PDMA1_ID: + apm82181_dma_tchan[1] = apm82181_chan; + break; + case APM82181_PDMA2_ID: + apm82181_dma_tchan[2] = apm82181_chan; + break; + case APM82181_PDMA3_ID: + apm82181_dma_tchan[3] = apm82181_chan; + break; + case APM82181_XOR_ID: + apm82181_dma_tchan[4] = apm82181_chan; + apm82181_chan_start_null_xor(apm82181_chan); + break; + default: + BUG(); + } + apm82181_chan->needs_unmap = 1; + } + + return (i > 0) ? i : -ENOMEM; +} + +/** + * apm82181_desc_assign_cookie - assign a cookie + */ +static dma_cookie_t apm82181_desc_assign_cookie(apm82181_ch_t *chan, + apm82181_desc_t *desc) +{ + dma_cookie_t cookie = chan->common.cookie; + cookie++; + if (cookie < 0) + cookie = 1; + chan->common.cookie = desc->async_tx.cookie = cookie; + return cookie; +} + + +/** + * apm82181_adma_check_threshold - append CDBs to h/w chain if threshold + * has been achieved + */ +static void apm82181_adma_check_threshold(apm82181_ch_t *chan) +{ + dev_dbg(chan->device->common.dev, "apm82181 adma%d: pending: %d\n", + chan->device->id, chan->pending); + INFO; + if (chan->pending >= APM82181_ADMA_THRESHOLD) { + chan->pending = 0; + apm82181_chan_append(chan); + } +} + +/** + * apm82181_adma_tx_submit - submit new descriptor group to the channel + * (it's not necessary that descriptors will be submitted to the h/w + * chains too right now) + */ +static dma_cookie_t apm82181_adma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + apm82181_desc_t *sw_desc = tx_to_apm82181_adma_slot(tx); + apm82181_ch_t *chan = to_apm82181_adma_chan(tx->chan); + apm82181_desc_t *group_start, *old_chain_tail; + int slot_cnt; + int slots_per_op; + dma_cookie_t cookie; + group_start = sw_desc->group_head; + slot_cnt = group_start->slot_cnt; + slots_per_op = group_start->slots_per_op; + INFO; + spin_lock_bh(&chan->lock); + cookie = apm82181_desc_assign_cookie(chan, sw_desc); + + if (unlikely(list_empty(&chan->chain))) { + /* first peer */ + list_splice_init(&sw_desc->group_list, &chan->chain); + chan_first_cdb[chan->device->id] = group_start; + } else { + /* isn't first peer, bind CDBs to chain */ + old_chain_tail = list_entry(chan->chain.prev, + apm82181_desc_t, chain_node); + list_splice_init(&sw_desc->group_list, + &old_chain_tail->chain_node); + /* fix up the hardware chain */ + apm82181_desc_set_link(chan, old_chain_tail, group_start); + } + + /* increment the pending count by the number of operations */ + chan->pending += slot_cnt / slots_per_op; + apm82181_adma_check_threshold(chan); + spin_unlock_bh(&chan->lock); + + DBG("apm82181 adma%d:cookie: %d slot: %d tx %p\n", + chan->device->id, sw_desc->async_tx.cookie, sw_desc->idx, sw_desc); + return cookie; +} +/** + * apm82181_adma_prep_dma_xor - prepare CDB for a XOR operation + */ +static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_xor( + struct dma_chan *chan, dma_addr_t dma_dest, + dma_addr_t *dma_src, unsigned int src_cnt, size_t len, + unsigned long flags) +{ + apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan); + apm82181_desc_t *sw_desc, *group_start; + int slot_cnt, slots_per_op; + +#ifdef ADMA_DEBUG + printk("\n%s(%d):\n\tsrc: ", __func__, + apm82181_chan->device->id); + for (slot_cnt=0; slot_cnt < src_cnt; slot_cnt++) + printk("0x%llx ", dma_src[slot_cnt]); + printk("\n\tdst: 0x%llx\n", dma_dest); +#endif + if (unlikely(!len)) + return NULL; + BUG_ON(unlikely(len > APM82181_ADMA_XOR_MAX_BYTE_COUNT)); + + dev_dbg(apm82181_chan->device->common.dev, + "apm82181 adma%d: %s src_cnt: %d len: %u int_en: %d\n", + apm82181_chan->device->id, __func__, src_cnt, len, + flags & DMA_PREP_INTERRUPT ? 1 : 0); + + spin_lock_bh(&apm82181_chan->lock); + slot_cnt = apm82181_chan_xor_slot_count(len, src_cnt, &slots_per_op); + sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt, + slots_per_op); + if (sw_desc) { + group_start = sw_desc->group_head; + apm82181_desc_init_xor(group_start, src_cnt, flags); + apm82181_adma_set_dest(group_start, dma_dest, 0); + while (src_cnt--) + apm82181_adma_set_src(group_start, + dma_src[src_cnt], src_cnt); + apm82181_desc_set_byte_count(group_start, apm82181_chan, len); + sw_desc->unmap_len = len; + sw_desc->async_tx.flags = flags; + } + spin_unlock_bh(&apm82181_chan->lock); + + return sw_desc ? &sw_desc->async_tx : NULL; +} +/** + * apm82181_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation + */ +static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_interrupt( + struct dma_chan *chan, unsigned long flags) +{ + apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan); + apm82181_desc_t *sw_desc, *group_start; + int slot_cnt, slots_per_op; + + dev_dbg(apm82181_chan->device->common.dev, + "apm82181 adma%d: %s\n", apm82181_chan->device->id, + __FUNCTION__); + spin_lock_bh(&apm82181_chan->lock); + slot_cnt = slots_per_op = 1; + sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt, + slots_per_op); + if (sw_desc) { + group_start = sw_desc->group_head; + apm82181_desc_init_interrupt(group_start, apm82181_chan); + group_start->unmap_len = 0; + sw_desc->async_tx.flags = flags; + } + spin_unlock_bh(&apm82181_chan->lock); + + return sw_desc ? &sw_desc->async_tx : NULL; +} + +/** + * apm82181_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation + */ +static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan); + apm82181_desc_t *sw_desc, *group_start; + int slot_cnt, slots_per_op; + if (unlikely(!len)) + return NULL; + BUG_ON(unlikely(len > APM82181_ADMA_DMA_MAX_BYTE_COUNT)); + + spin_lock_bh(&apm82181_chan->lock); + + dev_dbg(apm82181_chan->device->common.dev, + "apm82181 adma%d: %s len: %u int_en %d \n", + apm82181_chan->device->id, __FUNCTION__, len, + flags & DMA_PREP_INTERRUPT ? 1 : 0); + + slot_cnt = slots_per_op = 1; + sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt, + slots_per_op); + if (sw_desc) { + group_start = sw_desc->group_head; + flags |= DMA_PREP_INTERRUPT; + apm82181_desc_init_memcpy(group_start, flags); + apm82181_adma_set_dest(group_start, dma_dest, 0); + apm82181_adma_set_src(group_start, dma_src, 0); + apm82181_desc_set_byte_count(group_start, apm82181_chan, len); + sw_desc->unmap_len = len; + sw_desc->async_tx.flags = flags; + } + spin_unlock_bh(&apm82181_chan->lock); + return sw_desc ? &sw_desc->async_tx : NULL; +} + + +/** + * apm82181_adma_set_dest - set destination address into descriptor + */ +static void apm82181_adma_set_dest(apm82181_desc_t *sw_desc, + dma_addr_t addr, int index) +{ + apm82181_ch_t *chan = to_apm82181_adma_chan(sw_desc->async_tx.chan); + BUG_ON(index >= sw_desc->dst_cnt); + + switch (chan->device->id) { + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + /* to do: support transfers lengths > + * APM82181_ADMA_DMA/XOR_MAX_BYTE_COUNT + */ + apm82181_desc_set_dest_addr(sw_desc->group_head, + // chan, 0x8, addr, index); // Enabling HB bus + chan, addr, index); + break; + case APM82181_XOR_ID: + sw_desc = apm82181_get_group_entry(sw_desc, index); + apm82181_desc_set_dest_addr(sw_desc, chan, + addr, index); + break; + default: + BUG(); + } +} + + +/** + * apm82181_adma_free_chan_resources - free the resources allocated + */ +static void apm82181_adma_free_chan_resources(struct dma_chan *chan) +{ + apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan); + apm82181_desc_t *iter, *_iter; + int in_use_descs = 0; + + apm82181_adma_slot_cleanup(apm82181_chan); + + spin_lock_bh(&apm82181_chan->lock); + list_for_each_entry_safe(iter, _iter, &apm82181_chan->chain, + chain_node) { + in_use_descs++; + list_del(&iter->chain_node); + } + list_for_each_entry_safe_reverse(iter, _iter, + &apm82181_chan->all_slots, slot_node) { + list_del(&iter->slot_node); + kfree(iter); + apm82181_chan->slots_allocated--; + } + apm82181_chan->last_used = NULL; + + dev_dbg(apm82181_chan->device->common.dev, + "apm82181 adma%d %s slots_allocated %d\n", + apm82181_chan->device->id, + __FUNCTION__, apm82181_chan->slots_allocated); + spin_unlock_bh(&apm82181_chan->lock); + + /* one is ok since we left it on there on purpose */ + if (in_use_descs > 1) + printk(KERN_ERR "GT: Freeing %d in use descriptors!\n", + in_use_descs - 1); +} + +/** + * apm82181_adma_tx_status - poll the status of an ADMA transaction + * @chan: ADMA channel handle + * @cookie: ADMA transaction identifier + */ +static enum dma_status apm82181_adma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + apm82181_adma_slot_cleanup(apm82181_chan); + + return dma_cookie_status(chan, cookie, txstate); +} + +/** + * apm82181_adma_eot_handler - end of transfer interrupt handler + */ +static irqreturn_t apm82181_adma_eot_handler(int irq, void *data) +{ + apm82181_ch_t *chan = data; + + dev_dbg(chan->device->common.dev, + "apm82181 adma%d: %s\n", chan->device->id, __FUNCTION__); + INFO; + if(chan->device->id == APM82181_XOR_ID) + tasklet_schedule(&chan->irq_tasklet); + apm82181_adma_device_clear_eot_status(chan); + + return IRQ_HANDLED; +} + +/** + * apm82181_adma_err_handler - DMA error interrupt handler; + * do the same things as a eot handler + */ +#if 0 +static irqreturn_t apm82181_adma_err_handler(int irq, void *data) +{ + apm82181_ch_t *chan = data; + dev_dbg(chan->device->common.dev, + "apm82181 adma%d: %s\n", chan->device->id, __FUNCTION__); + tasklet_schedule(&chan->irq_tasklet); + apm82181_adma_device_clear_eot_status(chan); + + return IRQ_HANDLED; +} +#endif +/** + * apm82181_test_callback - called when test operation has been done + */ +static void apm82181_test_callback (void *unused) +{ + complete(&apm82181_r5_test_comp); +} + +/** + * apm82181_adma_issue_pending - flush all pending descriptors to h/w + */ +static void apm82181_adma_issue_pending(struct dma_chan *chan) +{ + apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan); + + DBG("apm82181 adma%d: %s %d \n", apm82181_chan->device->id, + __FUNCTION__, apm82181_chan->pending); + if (apm82181_chan->pending) { + apm82181_chan->pending = 0; + apm82181_chan_append(apm82181_chan); + } +} + +static inline void xor_hw_init (apm82181_dev_t *adev) +{ + volatile xor_regs_t *xor_reg = adev->xor_base; + /* Reset XOR */ + xor_reg->crsr = XOR_CRSR_XASR_BIT; + xor_reg->crrr = XOR_CRSR_64BA_BIT; + + /* enable XOR engine interrupts */ + xor_reg->ier = XOR_IE_CBCIE_BIT | + XOR_IE_ICBIE_BIT | XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT; +} + +/* + * Per channel probe + */ +static int apm82181_dma_per_chan_probe(struct platform_device *ofdev) +{ + int ret = 0, irq; + const u32 *index, *dcr_regs, *pool_size; + apm82181_plb_dma_t *pdma; + apm82181_dev_t *adev; + apm82181_ch_t *chan; + struct device_node *np = ofdev->dev.of_node; + struct resource res; + int len; + + INFO; + pdma = dev_get_drvdata(ofdev->dev.parent); + BUG_ON(!pdma); + if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) { + printk("ERROR:No Free memory for allocating dma channels\n"); + ret = -ENOMEM; + goto out; + } + adev->dev = &ofdev->dev; + index = of_get_property(np, "cell-index", NULL); + if(!index) { + printk(KERN_ERR "adma-channel: Device node %s has missing or invalid " + "cell-index property\n", np->full_name); + goto err; + } + adev->id = (int)*index; + /* The XOR engine/PLB DMA 4 channels have different resources/pool_sizes */ + if (adev->id != APM82181_XOR_ID){ + dcr_regs = of_get_property(np, "dcr-reg", &len); + if (!dcr_regs || (len != 2 * sizeof(u32))) { + printk(KERN_ERR "plb_dma channel%d: Can't get DCR register base !", + adev->id); + goto err; + } + adev->dcr_base = dcr_regs[0]; + + pool_size = of_get_property(np, "pool_size", NULL); + if(!pool_size) { + printk(KERN_ERR "plb_dma channel%d: Device node %s has missing or " + "invalid pool_size property\n", adev->id, np->full_name); + goto err; + } + adev->pool_size = *pool_size; + } else { + if (of_address_to_resource(np, 0, &res)) { + printk(KERN_ERR "adma_xor channel%d %s: could not get resource address.\n", + adev->id,np->full_name); + goto err; + } + + DBG("XOR resource start = %llx end = %llx\n", res.start, res.end); + adev->xor_base = ioremap(res.start, res.end - res.start + 1); + if (!adev->xor_base){ + printk(KERN_ERR "XOR engine registers memory mapping failed.\n"); + goto err; + } + adev->pool_size = PAGE_SIZE << 1; + } + + adev->pdma = pdma; + adev->ofdev = ofdev; + dev_set_drvdata(&(ofdev->dev),adev); + + switch (adev->id){ + case APM82181_PDMA0_ID: + case APM82181_PDMA1_ID: + case APM82181_PDMA2_ID: + case APM82181_PDMA3_ID: + dma_cap_set(DMA_MEMCPY,adev->cap_mask); + break; + case APM82181_XOR_ID: + dma_cap_set(DMA_XOR,adev->cap_mask); + dma_cap_set(DMA_INTERRUPT,adev->cap_mask); + break; + default: + BUG(); + } + /* XOR h/w configuration */ + if(adev->id == APM82181_XOR_ID) + xor_hw_init(adev); + /* allocate coherent memory for hardware descriptors + * note: writecombine gives slightly better performance, but + * requires that we explicitly drain the write buffer + */ + if ((adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev, + adev->pool_size, &adev->dma_desc_pool, GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto err_dma_alloc; + } + + adev->common.cap_mask = adev->cap_mask; + INIT_LIST_HEAD(&adev->common.channels); + /* set base routines */ + adev->common.device_alloc_chan_resources = + apm82181_adma_alloc_chan_resources; + adev->common.device_free_chan_resources = + apm82181_adma_free_chan_resources; + adev->common.device_tx_status = apm82181_adma_tx_status; + adev->common.device_issue_pending = apm82181_adma_issue_pending; + adev->common.dev = &ofdev->dev; + + /* set prep routines based on capability */ + if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) { + adev->common.device_prep_dma_memcpy = + apm82181_adma_prep_dma_memcpy; + } + + if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) { + adev->common.device_prep_dma_interrupt = + apm82181_adma_prep_dma_interrupt; + } + + if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { + adev->common.max_xor = XOR_MAX_OPS; + adev->common.device_prep_dma_xor = + apm82181_adma_prep_dma_xor; + } + + /* create a channel */ + if ((chan = kzalloc(sizeof(*chan), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto err_chan_alloc; + } + tasklet_init(&chan->irq_tasklet, apm82181_adma_tasklet, + (unsigned long)chan); + + irq = irq_of_parse_and_map(np, 0); + switch (adev->id){ + case 0: + if (irq >= 0) { + ret = request_irq(irq, apm82181_adma_eot_handler, + IRQF_DISABLED, "adma-chan0", chan); + if (ret) { + printk("Failed to request IRQ %d\n",irq); + ret = -EIO; + goto err_irq; + } + } + break; + case 1: + if (irq >= 0) { + ret = request_irq(irq, apm82181_adma_eot_handler, + IRQF_DISABLED, "adma-chan1", chan); + if (ret) { + printk("Failed to request IRQ %d\n",irq); + ret = -EIO; + goto err_irq; + } + } + break; + case 2: + if (irq >= 0) { + ret = request_irq(irq, apm82181_adma_eot_handler, + IRQF_DISABLED, "adma-chan2", chan); + if (ret) { + printk("Failed to request IRQ %d\n",irq); + ret = -EIO; + goto err_irq; + } + } + break; + case 3: + if (irq >= 0) { + ret = request_irq(irq, apm82181_adma_eot_handler, + IRQF_DISABLED, "adma-chan3", chan); + if (ret) { + printk("Failed to request IRQ %d\n",irq); + ret = -EIO; + goto err_irq; + } + } + break; + case 4: + if (irq >= 0) { + ret = request_irq(irq, apm82181_adma_eot_handler, + IRQF_DISABLED, "adma-xor", chan); + if (ret) { + printk("Failed to request IRQ %d\n",irq); + ret = -EIO; + goto err_irq; + } + } + break; + default: + BUG(); + } + + spin_lock_init(&chan->lock); + chan->device = adev; + INIT_LIST_HEAD(&chan->chain); + INIT_LIST_HEAD(&chan->all_slots); + chan->common.device = &adev->common; + list_add_tail(&chan->common.device_node, &adev->common.channels); + adev->common.chancnt++; + + printk( "AMCC(R) APM82181 ADMA Engine found [%d]: " + "( capabilities: %s%s%s%s%s%s)\n", + adev->id, + dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq_xor " : "", + dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : + "", + dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", + dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : + "", + dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", + dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "int " : ""); + INFO; + ret = dma_async_device_register(&adev->common); + if (ret) { + dev_err(&ofdev->dev, "failed to register dma async device"); + goto err_irq; + } + INFO; + + goto out; +err_irq: + kfree(chan); +err_chan_alloc: + dma_free_coherent(&ofdev->dev, adev->pool_size, + adev->dma_desc_pool_virt, adev->dma_desc_pool); +err_dma_alloc: + if (adev->xor_base) + iounmap(adev->xor_base); +err: + kfree(adev); +out: + return ret; +} + +static struct of_device_id dma_4chan_match[] = +{ + { + .compatible = "amcc,apm82181-adma", + }, + {}, +}; + +static struct of_device_id dma_per_chan_match[] = { + {.compatible = "amcc,apm82181-dma-4channel",}, + {.compatible = "amcc,xor",}, + {}, +}; +/* + * apm82181_adma_probe - probe the asynch device + */ +static int apm82181_pdma_probe(struct platform_device *ofdev) +{ + int ret = 0; + apm82181_plb_dma_t *pdma; + + if ((pdma = kzalloc(sizeof(*pdma), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto out; + } + pdma->dev = &ofdev->dev; + pdma->ofdev = ofdev; + printk(PPC4XX_EDMA "Probing AMCC APM82181 ADMA engines...\n"); + + dev_set_drvdata(&(ofdev->dev),pdma); + of_platform_bus_probe(ofdev->dev.of_node, dma_per_chan_match,&ofdev->dev); + +out: + return ret; +} + +/* + * apm82181_test_xor - test are RAID-5 XOR capability enabled successfully. + * For this we just perform one DMA XOR operation with the 3 sources + * to a destination + */ +static int apm82181_test_xor (apm82181_ch_t *chan) +{ + apm82181_desc_t *sw_desc, *group_start; + struct page *pg_src[3], *pg_dest; + char *a; + dma_addr_t dma_src_addr[3]; + dma_addr_t dma_dst_addr; + int rval = -EFAULT, i; + int len = PAGE_SIZE, src_cnt = 3; + int slot_cnt, slots_per_op; + INFO; + printk("ADMA channel %d XOR testing\n",chan->device->id); + for(i = 0; i < 3; i++){ + pg_src[i] = alloc_page(GFP_KERNEL); + if (!pg_src[i]) + return -ENOMEM; + } + pg_dest = alloc_page(GFP_KERNEL); + if (!pg_dest) + return -ENOMEM; + /* Fill the test page with ones */ + memset(page_address(pg_src[0]), 0xDA, len); + memset(page_address(pg_src[1]), 0xDA, len); + memset(page_address(pg_src[2]), 0x00, len); + memset(page_address(pg_dest), 0xA5, len); + for(i = 0; i < 3; i++){ + a = page_address(pg_src[i]); + printk("The virtual addr of src %d =%x\n",i, (unsigned int)a); + MEM_HEXDUMP(a,50); + } + a = page_address(pg_dest); + printk("The virtual addr of dest=%x\n", (unsigned int)a); + MEM_HEXDUMP(a,50); + + for(i = 0; i < 3; i++){ + dma_src_addr[i] = dma_map_page(chan->device->dev, pg_src[i], 0, len, + DMA_BIDIRECTIONAL); + } + dma_dst_addr = dma_map_page(chan->device->dev, pg_dest, 0, len, + DMA_BIDIRECTIONAL); + printk("dma_src_addr[0]: %llx; dma_src_addr[1]: %llx;\n " + "dma_src_addr[2]: %llx; dma_dst_addr %llx, len: %x\n", dma_src_addr[0], + dma_src_addr[1], dma_src_addr[2], dma_dst_addr, len); + + spin_lock_bh(&chan->lock); + slot_cnt = apm82181_chan_xor_slot_count(len, src_cnt, &slots_per_op); + sw_desc = apm82181_adma_alloc_slots(chan, slot_cnt, slots_per_op); + if (sw_desc) { + group_start = sw_desc->group_head; + apm82181_desc_init_xor(group_start, src_cnt, DMA_PREP_INTERRUPT); + /* Setup addresses */ + while (src_cnt--) + apm82181_adma_set_src(group_start, + dma_src_addr[src_cnt], src_cnt); + apm82181_adma_set_dest(group_start, dma_dst_addr, 0); + apm82181_desc_set_byte_count(group_start, chan, len); + sw_desc->unmap_len = PAGE_SIZE; + } else { + rval = -EFAULT; + spin_unlock_bh(&chan->lock); + goto exit; + } + spin_unlock_bh(&chan->lock); + + printk("Submit CDB...\n"); + MEM_HEXDUMP(sw_desc->hw_desc, 96); + async_tx_ack(&sw_desc->async_tx); + sw_desc->async_tx.callback = apm82181_test_callback; + sw_desc->async_tx.callback_param = NULL; + + init_completion(&apm82181_r5_test_comp); + apm82181_adma_tx_submit(&sw_desc->async_tx); + apm82181_adma_issue_pending(&chan->common); + //wait_for_completion(&apm82181_r5_test_comp); + /* wait for a while so that dma transaction finishes */ + mdelay(100); + /* Now check if the test page zeroed */ + a = page_address(pg_dest); + /* XOR result at destination */ + MEM_HEXDUMP(a,50); + if ((*(u32*)a) == 0x00000000 && memcmp(a, a+4, PAGE_SIZE-4)==0) { + /* page dest XOR is corect as expected - RAID-5 enabled */ + rval = 0; + } else { + /* RAID-5 was not enabled */ + rval = -EINVAL; + } + +exit: + dma_unmap_page(chan->device->dev, dma_src_addr[0], PAGE_SIZE, DMA_BIDIRECTIONAL); + dma_unmap_page(chan->device->dev, dma_src_addr[1], PAGE_SIZE, DMA_BIDIRECTIONAL); + dma_unmap_page(chan->device->dev, dma_src_addr[2], PAGE_SIZE, DMA_BIDIRECTIONAL); + dma_unmap_page(chan->device->dev, dma_dst_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(pg_src[0]); + __free_page(pg_src[1]); + __free_page(pg_src[2]); + __free_page(pg_dest); + return rval; +} + + +/* + * apm82181_test_dma - test are RAID-5 capabilities enabled successfully. + * For this we just perform one WXOR operation with the same source + * and destination addresses, the GF-multiplier is 1; so if RAID-5 + o/of_platform_driver_unregister(&apm82181_pdma_driver); + * capabilities are enabled then we'll get src/dst filled with zero. + */ +static int apm82181_test_dma (apm82181_ch_t *chan) +{ + apm82181_desc_t *sw_desc; + struct page *pg_src, *pg_dest; + char *a, *d; + dma_addr_t dma_src_addr; + dma_addr_t dma_dst_addr; + int rval = -EFAULT; + int len = PAGE_SIZE; + + printk("PLB DMA channel %d memcpy testing\n",chan->device->id); + pg_src = alloc_page(GFP_KERNEL); + if (!pg_src) + return -ENOMEM; + pg_dest = alloc_page(GFP_KERNEL); + if (!pg_dest) + return -ENOMEM; + /* Fill the test page with ones */ + memset(page_address(pg_src), 0x77, len); + memset(page_address(pg_dest), 0xa5, len); + a = page_address(pg_src); + printk("The virtual addr of src =%x\n", (unsigned int)a); + MEM_HEXDUMP(a,50); + a = page_address(pg_dest); + printk("The virtual addr of dest=%x\n", (unsigned int)a); + MEM_HEXDUMP(a,50); + dma_src_addr = dma_map_page(chan->device->dev, pg_src, 0, len, + DMA_BIDIRECTIONAL); + dma_dst_addr = dma_map_page(chan->device->dev, pg_dest, 0, len, + DMA_BIDIRECTIONAL); + printk("dma_src_addr: %llx; dma_dst_addr %llx\n", dma_src_addr, dma_dst_addr); + + spin_lock_bh(&chan->lock); + sw_desc = apm82181_adma_alloc_slots(chan, 1, 1); + if (sw_desc) { + /* 1 src, 1 dst, int_ena */ + apm82181_desc_init_memcpy(sw_desc, DMA_PREP_INTERRUPT); + //apm82181_desc_init_memcpy(sw_desc, 0); + /* Setup adresses */ + apm82181_adma_set_src(sw_desc, dma_src_addr, 0); + apm82181_adma_set_dest(sw_desc, dma_dst_addr, 0); + apm82181_desc_set_byte_count(sw_desc, chan, len); + sw_desc->unmap_len = PAGE_SIZE; + } else { + rval = -EFAULT; + spin_unlock_bh(&chan->lock); + goto exit; + } + spin_unlock_bh(&chan->lock); + + printk("Submit CDB...\n"); + MEM_HEXDUMP(sw_desc->hw_desc, 96); + async_tx_ack(&sw_desc->async_tx); + sw_desc->async_tx.callback = apm82181_test_callback; + sw_desc->async_tx.callback_param = NULL; + + init_completion(&apm82181_r5_test_comp); + apm82181_adma_tx_submit(&sw_desc->async_tx); + apm82181_adma_issue_pending(&chan->common); + //wait_for_completion(&apm82181_r5_test_comp); + + a = page_address(pg_src); + d = page_address(pg_dest); + if (!memcmp(a, d, len)) { + rval = 0; + } else { + rval = -EINVAL; + } + + a = page_address(pg_src); + printk("\nAfter DMA done:"); + printk("\nsrc %x value:\n", (unsigned int)a); + MEM_HEXDUMP(a,96); + a = page_address(pg_dest); + printk("\ndest%x value:\n", (unsigned int)a); + MEM_HEXDUMP(a,96); + +exit: + __free_page(pg_src); + __free_page(pg_dest); + return rval; +} + +static struct platform_driver apm82181_pdma_driver = { + .driver = { + .name = "apm82181_plb_dma", + .owner = THIS_MODULE, + .of_match_table = dma_4chan_match, + }, + .probe = apm82181_pdma_probe, + //.remove = apm82181_pdma_remove, +}; +struct platform_driver apm82181_dma_per_chan_driver = { + .driver = { + .name = "apm82181-dma-4channel", + .owner = THIS_MODULE, + .of_match_table = dma_per_chan_match, + }, + .probe = apm82181_dma_per_chan_probe, +}; + +static int __init apm82181_adma_per_chan_init (void) +{ + int rval; + rval = platform_driver_register(&apm82181_dma_per_chan_driver); + return rval; +} + +static int __init apm82181_adma_init (void) +{ + int rval; + struct proc_dir_entry *p; + + rval = platform_driver_register(&apm82181_pdma_driver); + + return rval; +} + +#if 0 +static void __exit apm82181_adma_exit (void) +{ + of_unregister_platform_driver(&apm82181_pdma_driver); + return; +} +module_exit(apm82181_adma_exit); +#endif + +module_init(apm82181_adma_per_chan_init); +module_init(apm82181_adma_init); + +MODULE_AUTHOR("Tai Tri Nguyen<ttnguyen@appliedmicro.com>"); +MODULE_DESCRIPTION("APM82181 ADMA Engine Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/ppc4xx/ppc460ex_4chan_dma.c b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.c new file mode 100644 index 00000000000..821e279e0b7 --- /dev/null +++ b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.c @@ -0,0 +1,1110 @@ +/* + * Copyright(c) 2008 Applied Micro Circuits Corporation(AMCC). All rights reserved. + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ + + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/async_tx.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/uaccess.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <asm/dcr-regs.h> +#include <asm/dcr.h> +#include "ppc460ex_4chan_dma.h" + + + +#ifdef DEBUG_TEST +#define dma_pr printk +#else +#define dma_pr +#endif +#define TEST_SIZE 12 + + +ppc460ex_plb_dma_dev_t *adev; + + + +int ppc460ex_get_dma_channel(void) +{ + int i; + unsigned int status = 0; + status = mfdcr(DCR_DMA2P40_SR); + + for(i=0; i<MAX_PPC460EX_DMA_CHANNELS; i++) { + if ((status & (1 >> (20+i))) == 0) + return i; + } + return -ENODEV; +} + + +int ppc460ex_get_dma_status(void) +{ + return (mfdcr(DCR_DMA2P40_SR)); + +} + + +int ppc460ex_set_src_addr(int ch_id, phys_addr_t src_addr) +{ + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk("%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + + +#ifdef PPC4xx_DMA_64BIT + mtdcr(DCR_DMA2P40_SAH0 + ch_id*8, src_addr >> 32); +#endif + mtdcr(DCR_DMA2P40_SAL0 + ch_id*8, (u32)src_addr); + + return DMA_STATUS_GOOD; +} + +int ppc460ex_set_dst_addr(int ch_id, phys_addr_t dst_addr) +{ + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + +#ifdef PPC4xx_DMA_64BIT + mtdcr(DCR_DMA2P40_DAH0 + ch_id*8, dst_addr >> 32); +#endif + mtdcr(DCR_DMA2P40_DAL0 + ch_id*8, (u32)dst_addr); + + return DMA_STATUS_GOOD; +} + + + +/* + * Sets the dma mode for single DMA transfers only. + * For scatter/gather transfers, the mode is passed to the + * alloc_dma_handle() function as one of the parameters. + * + * The mode is simply saved and used later. This allows + * the driver to call set_dma_mode() and set_dma_addr() in + * any order. + * + * Valid mode values are: + * + * DMA_MODE_READ peripheral to memory + * DMA_MODE_WRITE memory to peripheral + * DMA_MODE_MM memory to memory + * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src + * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst + */ +int ppc460ex_set_dma_mode(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int mode) +{ + + ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id]; + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk("%s: bad channel %d\n", __FUNCTION__, dma_chan->chan_id); + return DMA_STATUS_BAD_CHANNEL; + } + + dma_chan->mode = mode; + return DMA_STATUS_GOOD; +} + + + + +/* + * Sets the DMA Count register. Note that 'count' is in bytes. + * However, the DMA Count register counts the number of "transfers", + * where each transfer is equal to the bus width. Thus, count + * MUST be a multiple of the bus width. + */ +void ppc460ex_set_dma_count(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int count) +{ + ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id]; + +//#ifdef DEBUG_4xxDMA + + { + int error = 0; + switch (dma_chan->pwidth) { + case PW_8: + break; + case PW_16: + if (count & 0x1) + error = 1; + break; + case PW_32: + if (count & 0x3) + error = 1; + break; + case PW_64: + if (count & 0x7) + error = 1; + break; + + case PW_128: + if (count & 0xf) + error = 1; + break; + default: + printk("set_dma_count: invalid bus width: 0x%x\n", + dma_chan->pwidth); + return; + } + if (error) + printk + ("Warning: set_dma_count count 0x%x bus width %d\n", + count, dma_chan->pwidth); + } +//#endif + count = count >> dma_chan->shift; + //count = 10; + mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), count); + +} + + + + +/* + * Enables the channel interrupt. + * + * If performing a scatter/gatter transfer, this function + * MUST be called before calling alloc_dma_handle() and building + * the sgl list. Otherwise, interrupts will not be enabled, if + * they were previously disabled. + */ +int ppc460ex_enable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id) +{ + unsigned int control; + ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id]; + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + + dma_chan->int_enable = 1; + + + control = mfdcr(DCR_DMA2P40_CR0); + control |= DMA_CIE_ENABLE; /* Channel Interrupt Enable */ + mtdcr(DCR_DMA2P40_CR0, control); + + + +#if 1 + control = mfdcr(DCR_DMA2P40_CTC0); + control |= DMA_CTC_TCIE | DMA_CTC_ETIE| DMA_CTC_EIE; + mtdcr(DCR_DMA2P40_CTC0, control); + +#endif + + + return DMA_STATUS_GOOD; + +} + + +/* + * Disables the channel interrupt. + * + * If performing a scatter/gatter transfer, this function + * MUST be called before calling alloc_dma_handle() and building + * the sgl list. Otherwise, interrupts will not be disabled, if + * they were previously enabled. + */ +int ppc460ex_disable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id) +{ + unsigned int control; + ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id]; + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + dma_chan->int_enable = 0; + control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8)); + control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */ + mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control); + + return DMA_STATUS_GOOD; +} + + +/* + * This function returns the channel configuration. + */ +int ppc460ex_get_channel_config(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, + ppc460ex_plb_dma_ch_t *p_dma_ch) +{ + unsigned int polarity; + unsigned int control; + ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id]; + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + + memcpy(p_dma_ch, dma_chan, sizeof(ppc460ex_plb_dma_ch_t)); + + polarity = mfdcr(DCR_DMA2P40_POL); + + p_dma_ch->polarity = polarity & GET_DMA_POLARITY(ch_id); + control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8)); + + p_dma_ch->cp = GET_DMA_PRIORITY(control); + p_dma_ch->pwidth = GET_DMA_PW(control); + p_dma_ch->psc = GET_DMA_PSC(control); + p_dma_ch->pwc = GET_DMA_PWC(control); + p_dma_ch->phc = GET_DMA_PHC(control); + p_dma_ch->ce = GET_DMA_CE_ENABLE(control); + p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control); + p_dma_ch->shift = GET_DMA_PW(control); + p_dma_ch->pf = GET_DMA_PREFETCH(control); + + return DMA_STATUS_GOOD; + +} + +/* + * Sets the priority for the DMA channel dmanr. + * Since this is setup by the hardware init function, this function + * can be used to dynamically change the priority of a channel. + * + * Acceptable priorities: + * + * PRIORITY_LOW + * PRIORITY_MID_LOW + * PRIORITY_MID_HIGH + * PRIORITY_HIGH + * + */ +int ppc460ex_set_channel_priority(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, + unsigned int priority) +{ + unsigned int control; + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + + if ((priority != PRIORITY_LOW) && + (priority != PRIORITY_MID_LOW) && + (priority != PRIORITY_MID_HIGH) && (priority != PRIORITY_HIGH)) { + printk("%s:bad priority: 0x%x\n", __FUNCTION__, priority); + } + + control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8)); + control |= SET_DMA_PRIORITY(priority); + mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control); + + return DMA_STATUS_GOOD; +} + +/* + * Returns the width of the peripheral attached to this channel. This assumes + * that someone who knows the hardware configuration, boot code or some other + * init code, already set the width. + * + * The return value is one of: + * PW_8 + * PW_16 + * PW_32 + * PW_64 + * + * The function returns 0 on error. + */ +unsigned int ppc460ex_get_peripheral_width(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id) +{ + unsigned int control; + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8)); + return (GET_DMA_PW(control)); +} + +/* + * Enables the burst on the channel (BTEN bit in the control/count register) + * Note: + * For scatter/gather dma, this function MUST be called before the + * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the + * sgl list and used as each sgl element is added. + */ +int ppc460ex_enable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id) +{ + unsigned int ctc; + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + + ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) | DMA_CTC_BTEN; + mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc); + return DMA_STATUS_GOOD; +} + + +/* + * Disables the burst on the channel (BTEN bit in the control/count register) + * Note: + * For scatter/gather dma, this function MUST be called before the + * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the + * sgl list and used as each sgl element is added. + */ +int ppc460ex_disable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id) +{ + unsigned int ctc; + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + + ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) &~ DMA_CTC_BTEN; + mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc); + return DMA_STATUS_GOOD; +} + + +/* + * Sets the burst size (number of peripheral widths) for the channel + * (BSIZ bits in the control/count register)) + * must be one of: + * DMA_CTC_BSIZ_2 + * DMA_CTC_BSIZ_4 + * DMA_CTC_BSIZ_8 + * DMA_CTC_BSIZ_16 + * Note: + * For scatter/gather dma, this function MUST be called before the + * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the + * sgl list and used as each sgl element is added. + */ +int ppc460ex_set_burst_size(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, + unsigned int bsize) +{ + unsigned int ctc; + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + + ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) &~ DMA_CTC_BSIZ_MSK; + ctc |= (bsize & DMA_CTC_BSIZ_MSK); + mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc); + return DMA_STATUS_GOOD; +} + +/* + * Returns the number of bytes left to be transferred. + * After a DMA transfer, this should return zero. + * Reading this while a DMA transfer is still in progress will return + * unpredictable results. + */ +int ppc460ex_get_dma_residue(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id) +{ + unsigned int count; + ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id]; + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + + count = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)); + count &= DMA_CTC_TC_MASK ; + + return (count << dma_chan->shift); + +} + + +/* + * Configures a DMA channel, including the peripheral bus width, if a + * peripheral is attached to the channel, the polarity of the DMAReq and + * DMAAck signals, etc. This information should really be setup by the boot + * code, since most likely the configuration won't change dynamically. + * If the kernel has to call this function, it's recommended that it's + * called from platform specific init code. The driver should not need to + * call this function. + */ +int ppc460ex_init_dma_channel(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, + ppc460ex_plb_dma_ch_t *p_init) +{ + unsigned int polarity; + uint32_t control = 0; + ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id]; + + + DMA_MODE_READ = (unsigned long) DMA_TD; /* Peripheral to Memory */ + DMA_MODE_WRITE = 0; /* Memory to Peripheral */ + + if (!p_init) { + printk("%s: NULL p_init\n", __FUNCTION__); + return DMA_STATUS_NULL_POINTER; + } + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } +#if DCR_DMA2P40_POL > 0 + polarity = mfdcr(DCR_DMA2P40_POL); +#else + polarity = 0; +#endif + + p_init->int_enable = 0; + p_init->buffer_enable = 1; + p_init->etd_output = 1; + p_init->tce_enable = 1; + p_init->pl = 0; + p_init->dai = 1; + p_init->sai = 1; + /* Duc Dang: make channel priority to 2, original is 3 */ + p_init->cp = 2; + p_init->pwidth = PW_8; + p_init->psc = 0; + p_init->pwc = 0; + p_init->phc = 0; + p_init->pf = 1; + + + /* Setup the control register based on the values passed to + * us in p_init. Then, over-write the control register with this + * new value. + */ +#if 0 + control |= SET_DMA_CONTROL; +#endif + control = SET_DMA_CONTROL; + /* clear all polarity signals and then "or" in new signal levels */ + +//PMB - Workaround + //control = 0x81A2CD80; + //control = 0x81A00180; + + + polarity &= ~GET_DMA_POLARITY(ch_id); + polarity |= p_init->polarity; + +#if DCR_DMA2P40_POL > 0 + mtdcr(DCR_DMA2P40_POL, polarity); +#endif + mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control); + + /* save these values in our dma channel structure */ + //memcpy(dma_chan, p_init, sizeof(ppc460ex_plb_dma_ch_t)); + /* + * The peripheral width values written in the control register are: + * PW_8 0 + * PW_16 1 + * PW_32 2 + * PW_64 3 + * PW_128 4 + * + * Since the DMA count register takes the number of "transfers", + * we need to divide the count sent to us in certain + * functions by the appropriate number. It so happens that our + * right shift value is equal to the peripheral width value. + */ + dma_chan->shift = p_init->pwidth; + dma_chan->sai = p_init->sai; + dma_chan->dai = p_init->dai; + dma_chan->tce_enable = p_init->tce_enable; + dma_chan->mode = DMA_MODE_MM; + /* + * Save the control word for easy access. + */ + dma_chan->control = control; + mtdcr(DCR_DMA2P40_SR, 0xffffffff); + + + return DMA_STATUS_GOOD; +} + + +int ppc460ex_enable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id) +{ + unsigned int control; + ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id]; + unsigned int status_bits[] = { DMA_CS0 | DMA_TS0 | DMA_CH0_ERR, + DMA_CS1 | DMA_TS1 | DMA_CH1_ERR}; + + if (dma_chan->in_use) { + printk("%s:enable_dma: channel %d in use\n", __FUNCTION__, ch_id); + return DMA_STATUS_CHANNEL_NOTFREE; + } + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + +#if 0 + if (dma_chan->mode == DMA_MODE_READ) { + /* peripheral to memory */ + ppc460ex_set_src_addr(ch_id, 0); + ppc460ex_set_dst_addr(ch_id, dma_chan->addr); + } else if (dma_chan->mode == DMA_MODE_WRITE) { + /* memory to peripheral */ + ppc460ex_set_src_addr(ch_id, dma_chan->addr); + ppc460ex_set_dst_addr(ch_id, 0); + } +#endif + /* for other xfer modes, the addresses are already set */ + control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8)); + control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */ + if (dma_chan->mode == DMA_MODE_MM) { + /* software initiated memory to memory */ + control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE; + control |= DMA_MODE_MM; + if (dma_chan->dai) { + control |= DMA_DAI; + } + if (dma_chan->sai) { + control |= DMA_SAI; + } + } + + mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control); + /* + * Clear the CS, TS, RI bits for the channel from DMASR. This + * has been observed to happen correctly only after the mode and + * ETD/DCE bits in DMACRx are set above. Must do this before + * enabling the channel. + */ + mtdcr(DCR_DMA2P40_SR, status_bits[ch_id]); + /* + * For device-paced transfers, Terminal Count Enable apparently + * must be on, and this must be turned on after the mode, etc. + * bits are cleared above (at least on Redwood-6). + */ + + if ((dma_chan->mode == DMA_MODE_MM_DEVATDST) || + (dma_chan->mode == DMA_MODE_MM_DEVATSRC)) + control |= DMA_TCE_ENABLE; + + /* + * Now enable the channel. + */ + + control |= (dma_chan->mode | DMA_CE_ENABLE); + control |= DMA_BEN; + //control = 0xc4effec0; + + mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control); + dma_chan->in_use = 1; + return 0; + +} + + +void +ppc460ex_disable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id) +{ + unsigned int control; + ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id]; + + if (!dma_chan->in_use) { + printk("disable_dma: channel %d not in use\n", ch_id); + return; + } + + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk("disable_dma: bad channel: %d\n", ch_id); + return; + } + + control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8)); + control &= ~DMA_CE_ENABLE; + mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control); + + dma_chan->in_use = 0; +} + + + + +/* + * Clears the channel status bits + */ +int ppc460ex_clear_dma_status(unsigned int ch_id) +{ + if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) { + printk("KERN_ERR %s: bad channel %d\n", __FUNCTION__, ch_id); + return DMA_STATUS_BAD_CHANNEL; + } + + mtdcr(DCR_DMA2P40_SR, ((u32)DMA_CH0_ERR | (u32)DMA_CS0 | (u32)DMA_TS0) >> ch_id); + return DMA_STATUS_GOOD; + +} + + +/** + * ppc460ex_dma_eot_handler - end of transfer interrupt handler + */ +irqreturn_t ppc460ex_4chan_dma_eot_handler(int irq, void *data) +{ + unsigned int data_read = 0; + unsigned int try_cnt = 0; + + //printk("transfer complete\n"); + data_read = mfdcr(DCR_DMA2P40_SR); + //printk("%s: status 0x%08x\n", __FUNCTION__, data_read); + + do{ + //while bit 3 TC done is 0 + data_read = mfdcr(DCR_DMA2P40_SR); + if (data_read & 0x00800000 ) {printk("test FAIL\n"); } //see if error bit is set + }while(((data_read & 0x80000000) != 0x80000000) && ++try_cnt <= 10);// TC is now 0 + + data_read = mfdcr(DCR_DMA2P40_SR); + while (data_read & 0x00000800){ //while channel is busy + data_read = mfdcr(DCR_DMA2P40_SR); + printk("%s: status for busy 0x%08x\n", __FUNCTION__, data_read); + } + mtdcr(DCR_DMA2P40_SR, 0xffffffff); + + + + return IRQ_HANDLED; +} + + + +static struct of_device_id dma_per_chan_match[] = { + { + .compatible = "amcc,dma-4channel", + }, + {}, +}; + + + + +#if 0 +/*** test code ***/ +static int ppc460ex_dma_memcpy_self_test(ppc460ex_plb_dma_dev_t *device, unsigned int dma_ch_id) +{ + ppc460ex_plb_dma_ch_t p_init; + int res = 0, i; + unsigned int control; + phys_addr_t *src; + phys_addr_t *dest; + + phys_addr_t *gap; + + phys_addr_t dma_dest, dma_src; + + src = kzalloc(TEST_SIZE, GFP_KERNEL); + if (!src) + return -ENOMEM; + gap = kzalloc(200, GFP_KERNEL); + if (!gap) + return -ENOMEM; + + + + dest = kzalloc(TEST_SIZE, GFP_KERNEL); + if (!dest) { + kfree(src); + return -ENOMEM; + } + + printk("src = 0x%08x\n", (unsigned int)src); + printk("gap = 0x%08x\n", (unsigned int)gap); + printk("dest = 0x%08x\n", (unsigned int)dest); + + /* Fill in src buffer */ + for (i = 0; i < TEST_SIZE; i++) + ((u8*)src)[i] = (u8)i; + + printk("dump src\n"); + DMA_HEXDUMP(src, TEST_SIZE); + DMA_HEXDUMP(dest, TEST_SIZE); +#if 1 + dma_src = dma_map_single(p_init.device->dev, src, TEST_SIZE, + DMA_TO_DEVICE); + dma_dest = dma_map_single(p_init.device->dev, dest, TEST_SIZE, + DMA_FROM_DEVICE); +#endif + printk("%s:channel = %d chan 0x%08x\n", __FUNCTION__, device->chan[dma_ch_id]->chan_id, + (unsigned int)(device->chan)); + + p_init.polarity = 0; + p_init.pwidth = PW_32; + p_init.in_use = 0; + p_init.sai = 1; + p_init.dai = 1; + res = ppc460ex_init_dma_channel(device, dma_ch_id, &p_init); + + if (res) { + printk("%32s: init_dma_channel return %d\n", + __FUNCTION__, res); + } + ppc460ex_clear_dma_status(dma_ch_id); + + ppc460ex_set_src_addr(dma_ch_id, dma_src); + ppc460ex_set_dst_addr(dma_ch_id, dma_dest); + + ppc460ex_set_dma_mode(device, dma_ch_id, DMA_MODE_MM); + ppc460ex_set_dma_count(device, dma_ch_id, TEST_SIZE); + + res = ppc460ex_enable_dma_interrupt(device, dma_ch_id); + if (res) { + printk("%32s: en/disable_dma_interrupt\n", + __FUNCTION__); + } + + + if (dma_ch_id == 0) + control = mfdcr(DCR_DMA2P40_CR0); + else if (dma_ch_id == 1) + control = mfdcr(DCR_DMA2P40_CR1); + + + control &= ~(SET_DMA_BEN(1)); + control &= ~(SET_DMA_PSC(3)); + control &= ~(SET_DMA_PWC(0x3f)); + control &= ~(SET_DMA_PHC(0x7)); + control &= ~(SET_DMA_PL(1)); + + + + if (dma_ch_id == 0) + mtdcr(DCR_DMA2P40_CR0, control); + else if (dma_ch_id == 1) + mtdcr(DCR_DMA2P40_CR1, control); + + + ppc460ex_enable_dma(device, dma_ch_id); + + + if (memcmp(src, dest, TEST_SIZE)) { + printk("Self-test copy failed compare, disabling\n"); + res = -ENODEV; + goto out; + } + + + return 0; + + out: kfree(src); + kfree(dest); + return res; + +} + + + +static int test1(void) +{ + void *src, *dest; + void *src1, *dest1; + int i; + unsigned int chan; + + src = kzalloc(TEST_SIZE, GFP_KERNEL); + if (!src) + return -ENOMEM; + + dest = kzalloc(TEST_SIZE, GFP_KERNEL); + if (!dest) { + kfree(src); + return -ENOMEM; + } + + src1 = kzalloc(TEST_SIZE, GFP_KERNEL); + if (!src1) + return -ENOMEM; + + dest1 = kzalloc(TEST_SIZE, GFP_KERNEL); + if (!dest1) { + kfree(src1); + return -ENOMEM; + } + + /* Fill in src buffer */ + for (i = 0; i < TEST_SIZE; i++) + ((u8*)src)[i] = (u8)i; + + /* Fill in src buffer */ + for (i = 0; i < TEST_SIZE; i++) + ((u8*)src1)[i] = (u8)0xaa; + +#ifdef DEBUG_TEST + DMA_HEXDUMP(src, TEST_SIZE); + DMA_HEXDUMP(dest, TEST_SIZE); + DMA_HEXDUMP(src1, TEST_SIZE); + DMA_HEXDUMP(dest1, TEST_SIZE); +#endif + chan = ppc460ex_get_dma_channel(); + +#ifdef ENABLE_SGL + test_sgdma_memcpy(src, dest, src1, dest1, TEST_SIZE, chan); +#endif + test_dma_memcpy(src, dest, TEST_SIZE, chan); + + + out: kfree(src); + kfree(dest); + kfree(src1); + kfree(dest1); + + return 0; + +} +#endif + + + +/******************************************************************************* + * Module Initialization Routine + ******************************************************************************* + */ +int ppc460ex_dma_per_chan_probe(struct platform_device *ofdev) +{ + int ret=0; + //ppc460ex_plb_dma_dev_t *adev; + ppc460ex_plb_dma_ch_t *new_chan; + int err; + + + + adev = dev_get_drvdata(ofdev->dev.parent); + BUG_ON(!adev); + /* create a device */ + if ((new_chan = kzalloc(sizeof(*new_chan), GFP_KERNEL)) == NULL) { + printk("ERROR:No Free memory for allocating dma channels\n"); + ret = -ENOMEM; + goto err; + } + + err = of_address_to_resource(ofdev->dev.of_node,0,&new_chan->reg); + if (err) { + printk("ERROR:Can't get %s property reg\n", __FUNCTION__); + goto err; + } + new_chan->device = adev; + new_chan->reg_base = ioremap(new_chan->reg.start,new_chan->reg.end - new_chan->reg.start + 1); +#if 1 + printk("PPC460ex PLB DMA engine @0x%02X_%08X size %d\n", + (u32)(new_chan->reg.start >> 32), + (u32)new_chan->reg.start, + (u32)(new_chan->reg.end - new_chan->reg.start + 1)); +#endif + + switch(new_chan->reg.start) { + case 0x100: + new_chan->chan_id = 0; + break; + case 0x108: + new_chan->chan_id = 1; + break; + case 0x110: + new_chan->chan_id = 2; + break; + case 0x118: + new_chan->chan_id = 3; + break; + } + new_chan->chan_id = ((new_chan->reg.start - 0x100)& 0xfff) >> 3; + printk("new_chan->chan_id 0x%x\n",new_chan->chan_id); + adev->chan[new_chan->chan_id] = new_chan; + printk("new_chan->chan->chan_id 0x%x\n",adev->chan[new_chan->chan_id]->chan_id); + //adev->chan[new_chan->chan_id]->reg_base = new_chan->reg_base; + + return 0; + + err: + return ret; + +} + +int ppc460ex_dma_4chan_probe(struct platform_device *ofdev) +{ + int ret=0, irq = 0; + //ppc460ex_plb_dma_dev_t *adev; + ppc460ex_plb_dma_ch_t *chan = NULL; + struct device_node *np = ofdev->dev.of_node; + + /* create a device */ + if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto err_adev_alloc; + } + adev->dev = &ofdev->dev; +#if !defined(CONFIG_APM821xx) + err = of_address_to_resource(np,0,&adev->reg); + if(err) { + printk(KERN_ERR"Can't get %s property 'reg'\n",ofdev->node->full_name); + } +#endif + printk(KERN_INFO"Probing AMCC DMA driver\n"); +#if !defined(CONFIG_APM821xx) + adev->reg_base = ioremap(adev->reg.start, adev->reg.end - adev->reg.start + 1); +#endif + +#if 1 + irq = of_irq_to_resource(np, 0, NULL); + if (irq >= 0) { + ret = request_irq(irq, ppc460ex_4chan_dma_eot_handler, + IRQF_DISABLED, "Peripheral DMA0-1", chan); + if (ret) { + ret = -EIO; + goto err_irq; + } + //irq = platform_get_irq(adev, 0); + /* only DMA engines have a separate err IRQ + * so it's Ok if irq < 0 in XOR case + */ + } else + ret = -ENXIO; + +#if !defined(CONFIG_APM821xx) + printk("PPC4xx PLB DMA engine @0x%02X_%08X size %d IRQ %d \n", + (u32)(adev->reg.start >> 32), + (u32)adev->reg.start, + (u32)(adev->reg.end - adev->reg.start + 1), + irq); +#else + printk("PPC4xx PLB DMA engine IRQ %d\n", irq); +#endif +#endif + dev_set_drvdata(&(ofdev->dev),adev); + of_platform_bus_probe(np,dma_per_chan_match,&ofdev->dev); + + + //ppc460ex_dma_memcpy_self_test(adev, 0); + //test1(); + + + return 0; + + +err_adev_alloc: + //release_mem_region(adev->reg.start, adev->reg.end - adev->reg.start); +err_irq: + kfree(chan); + + return ret; +} + + +static struct of_device_id dma_4chan_match[] = { + { + .compatible = "amcc,dma", + }, + {}, +}; + +struct platform_driver ppc460ex_dma_4chan_driver = { + .driver = { + .name = "plb_dma", + .owner = THIS_MODULE, + .of_match_table = dma_4chan_match, + }, + .probe = ppc460ex_dma_4chan_probe, +}; + +struct platform_driver ppc460ex_dma_per_chan_driver = { + .driver = { + .name = "dma-4channel", + .owner = THIS_MODULE, + .of_match_table = dma_per_chan_match, + }, + .probe = ppc460ex_dma_per_chan_probe, +}; + + +static int __init mod_init (void) +{ + printk("%s:%d\n", __FUNCTION__, __LINE__); + return platform_driver_register(&ppc460ex_dma_4chan_driver); + printk("here 2\n"); +} + +static void __exit mod_exit(void) +{ + platform_driver_unregister(&ppc460ex_dma_4chan_driver); +} + +static int __init ppc460ex_dma_per_chan_init (void) +{ + printk("%s:%d\n", __FUNCTION__, __LINE__); + return platform_driver_register(&ppc460ex_dma_per_chan_driver); + printk("here 3\n"); +} + +static void __exit ppc460ex_dma_per_chan_exit(void) +{ + platform_driver_unregister(&ppc460ex_dma_per_chan_driver); +} + +subsys_initcall(ppc460ex_dma_per_chan_init); +subsys_initcall(mod_init); + +//module_exit(mod_exit); + +//module_exit(ppc460ex_dma_per_chan_exit); + +MODULE_DESCRIPTION("AMCC PPC460EX 4 channel Engine Driver"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL_GPL(ppc460ex_get_dma_status); +EXPORT_SYMBOL_GPL(ppc460ex_set_src_addr); +EXPORT_SYMBOL_GPL(ppc460ex_set_dst_addr); +EXPORT_SYMBOL_GPL(ppc460ex_set_dma_mode); +EXPORT_SYMBOL_GPL(ppc460ex_set_dma_count); +EXPORT_SYMBOL_GPL(ppc460ex_enable_dma_interrupt); +EXPORT_SYMBOL_GPL(ppc460ex_init_dma_channel); +EXPORT_SYMBOL_GPL(ppc460ex_enable_dma); +EXPORT_SYMBOL_GPL(ppc460ex_disable_dma); +EXPORT_SYMBOL_GPL(ppc460ex_clear_dma_status); +EXPORT_SYMBOL_GPL(ppc460ex_get_dma_residue); +EXPORT_SYMBOL_GPL(ppc460ex_disable_dma_interrupt); +EXPORT_SYMBOL_GPL(ppc460ex_get_channel_config); +EXPORT_SYMBOL_GPL(ppc460ex_set_channel_priority); +EXPORT_SYMBOL_GPL(ppc460ex_get_peripheral_width); +EXPORT_SYMBOL_GPL(ppc460ex_enable_burst); +EXPORT_SYMBOL_GPL(ppc460ex_disable_burst); +EXPORT_SYMBOL_GPL(ppc460ex_set_burst_size); + +/************************************************************************/ diff --git a/drivers/dma/ppc4xx/ppc460ex_4chan_dma.h b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.h new file mode 100644 index 00000000000..c9448f34de4 --- /dev/null +++ b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.h @@ -0,0 +1,531 @@ + + +#include <linux/types.h> + + + + +#define DMA_HEXDUMP(b, l) \ + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \ + 16, 1, (b), (l), false); + + +#define MAX_PPC460EX_DMA_CHANNELS 4 + + +#define DCR_DMA0_BASE 0x200 +#define DCR_DMA1_BASE 0x208 +#define DCR_DMA2_BASE 0x210 +#define DCR_DMA3_BASE 0x218 +#define DCR_DMASR_BASE 0x220 + + + + + + +/* DMA Registers */ +#define DCR_DMA2P40_CR0 (DCR_DMA0_BASE + 0x0) /* DMA Channel Control 0 */ +#define DCR_DMA2P40_CTC0 (DCR_DMA0_BASE + 0x1) /* DMA Count 0 */ +#define DCR_DMA2P40_SAH0 (DCR_DMA0_BASE + 0x2) /* DMA Src Addr High 0 */ +#define DCR_DMA2P40_SAL0 (DCR_DMA0_BASE + 0x3) /* DMA Src Addr Low 0 */ +#define DCR_DMA2P40_DAH0 (DCR_DMA0_BASE + 0x4) /* DMA Dest Addr High 0 */ +#define DCR_DMA2P40_DAL0 (DCR_DMA0_BASE + 0x5) /* DMA Dest Addr Low 0 */ +#define DCR_DMA2P40_SGH0 (DCR_DMA0_BASE + 0x6) /* DMA SG Desc Addr High 0 */ +#define DCR_DMA2P40_SGL0 (DCR_DMA0_BASE + 0x7) /* DMA SG Desc Addr Low 0 */ + +#define DCR_DMA2P40_CR1 (DCR_DMA1_BASE + 0x0) /* DMA Channel Control 1 */ +#define DCR_DMA2P40_CTC1 (DCR_DMA1_BASE + 0x1) /* DMA Count 1 */ +#define DCR_DMA2P40_SAH1 (DCR_DMA1_BASE + 0x2) /* DMA Src Addr High 1 */ +#define DCR_DMA2P40_SAL1 (DCR_DMA1_BASE + 0x3) /* DMA Src Addr Low 1 */ +#define DCR_DMA2P40_DAH1 (DCR_DMA1_BASE + 0x4) /* DMA Dest Addr High 1 */ +#define DCR_DMA2P40_DAL1 (DCR_DMA1_BASE + 0x5) /* DMA Dest Addr Low 1 */ +#define DCR_DMA2P40_SGH1 (DCR_DMA1_BASE + 0x6) /* DMA SG Desc Addr High 1 */ +#define DCR_DMA2P40_SGL1 (DCR_DMA1_BASE + 0x7) /* DMA SG Desc Addr Low 1 */ + +#define DCR_DMA2P40_CR2 (DCR_DMA2_BASE + 0x0) /* DMA Channel Control 2 */ +#define DCR_DMA2P40_CTC2 (DCR_DMA2_BASE + 0x1) /* DMA Count 2 */ +#define DCR_DMA2P40_SAH2 (DCR_DMA2_BASE + 0x2) /* DMA Src Addr High 2 */ +#define DCR_DMA2P40_SAL2 (DCR_DMA2_BASE + 0x3) /* DMA Src Addr Low 2 */ +#define DCR_DMA2P40_DAH2 (DCR_DMA2_BASE + 0x4) /* DMA Dest Addr High 2 */ +#define DCR_DMA2P40_DAL2 (DCR_DMA2_BASE + 0x5) /* DMA Dest Addr Low 2 */ +#define DCR_DMA2P40_SGH2 (DCR_DMA2_BASE + 0x6) /* DMA SG Desc Addr High 2 */ +#define DCR_DMA2P40_SGL2 (DCR_DMA2_BASE + 0x7) /* DMA SG Desc Addr Low 2 */ + +#define DCR_DMA2P40_CR3 (DCR_DMA3_BASE + 0x0) /* DMA Channel Control 3 */ +#define DCR_DMA2P40_CTC3 (DCR_DMA3_BASE + 0x1) /* DMA Count 3 */ +#define DCR_DMA2P40_SAH3 (DCR_DMA3_BASE + 0x2) /* DMA Src Addr High 3 */ +#define DCR_DMA2P40_SAL3 (DCR_DMA3_BASE + 0x3) /* DMA Src Addr Low 3 */ +#define DCR_DMA2P40_DAH3 (DCR_DMA3_BASE + 0x4) /* DMA Dest Addr High 3 */ +#define DCR_DMA2P40_DAL3 (DCR_DMA3_BASE + 0x5) /* DMA Dest Addr Low 3 */ +#define DCR_DMA2P40_SGH3 (DCR_DMA3_BASE + 0x6) /* DMA SG Desc Addr High 3 */ +#define DCR_DMA2P40_SGL3 (DCR_DMA3_BASE + 0x7) /* DMA SG Desc Addr Low 3 */ + +#define DCR_DMA2P40_SR (DCR_DMASR_BASE + 0x0) /* DMA Status Register */ +#define DCR_DMA2P40_SGC (DCR_DMASR_BASE + 0x3) /* DMA Scatter/Gather Command */ +#define DCR_DMA2P40_SLP (DCR_DMASR_BASE + 0x5) /* DMA Sleep Register */ +#define DCR_DMA2P40_POL (DCR_DMASR_BASE + 0x6) /* DMA Polarity Register */ + + + +/* + * Function return status codes + * These values are used to indicate whether or not the function + * call was successful, or a bad/invalid parameter was passed. + */ +#define DMA_STATUS_GOOD 0 +#define DMA_STATUS_BAD_CHANNEL 1 +#define DMA_STATUS_BAD_HANDLE 2 +#define DMA_STATUS_BAD_MODE 3 +#define DMA_STATUS_NULL_POINTER 4 +#define DMA_STATUS_OUT_OF_MEMORY 5 +#define DMA_STATUS_SGL_LIST_EMPTY 6 +#define DMA_STATUS_GENERAL_ERROR 7 +#define DMA_STATUS_CHANNEL_NOTFREE 8 + +#define DMA_CHANNEL_BUSY 0x80000000 + +/* + * These indicate status as returned from the DMA Status Register. + */ +#define DMA_STATUS_NO_ERROR 0 +#define DMA_STATUS_CS 1 /* Count Status */ +#define DMA_STATUS_TS 2 /* Transfer Status */ +#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */ +#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */ + +/* + * DMA Channel Control Registers + */ +#ifdef CONFIG_44x +#define PPC4xx_DMA_64BIT +#define DMA_CR_OFFSET 1 +#else +#define DMA_CR_OFFSET 0 +#endif + +#define DMA_CE_ENABLE (1<<31) /* DMA Channel Enable */ +#define SET_DMA_CE_ENABLE(x) (((x)&0x1)<<31) +#define GET_DMA_CE_ENABLE(x) (((x)&DMA_CE_ENABLE)>>31) + +#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */ +#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30) +#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30) + +#define DMA_TD (1<<29) +#define SET_DMA_TD(x) (((x)&0x1)<<29) +#define GET_DMA_TD(x) (((x)&DMA_TD)>>29) + +#define DMA_PL (1<<28) /* Peripheral Location */ +#define SET_DMA_PL(x) (((x)&0x1)<<28) +#define GET_DMA_PL(x) (((x)&DMA_PL)>>28) + +#define EXTERNAL_PERIPHERAL 0 +#define INTERNAL_PERIPHERAL 1 + +#define SET_DMA_PW(x) (((x)&0x7)<<(26-DMA_CR_OFFSET)) /* Peripheral Width */ +#define DMA_PW_MASK SET_DMA_PW(7) +#define PW_8 0 +#define PW_16 1 +#define PW_32 2 +#define PW_64 3 +#define PW_128 4 + + +#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>(26-DMA_CR_OFFSET)) + +#define DMA_DAI (1<<(25-DMA_CR_OFFSET)) /* Destination Address Increment */ +#define SET_DMA_DAI(x) (((x)&0x1)<<(25-DMA_CR_OFFSET)) + +#define DMA_SAI (1<<(24-DMA_CR_OFFSET)) /* Source Address Increment */ +#define SET_DMA_SAI(x) (((x)&0x1)<<(24-DMA_CR_OFFSET)) + +#define DMA_BEN (1<<(23-DMA_CR_OFFSET)) /* Buffer Enable */ +#define SET_DMA_BEN(x) (((x)&0x1)<<(23-DMA_CR_OFFSET)) + +#define SET_DMA_TM(x) (((x)&0x3)<<(21-DMA_CR_OFFSET)) /* Transfer Mode */ +#define DMA_TM_MASK SET_DMA_TM(3) +#define TM_PERIPHERAL 0 /* Peripheral */ +#define TM_RESERVED 1 /* Reserved */ +#define TM_S_MM 2 /* Memory to Memory */ +#define TM_D_MM 3 /* Device Paced Memory to Memory */ +#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>(21-DMA_CR_OFFSET)) + +#define SET_DMA_PSC(x) (((x)&0x3)<<(19-DMA_CR_OFFSET)) /* Peripheral Setup Cycles */ +#define DMA_PSC_MASK SET_DMA_PSC(3) +#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>(19-DMA_CR_OFFSET)) + +#define SET_DMA_PWC(x) (((x)&0x3F)<<(13-DMA_CR_OFFSET)) /* Peripheral Wait Cycles */ +#define DMA_PWC_MASK SET_DMA_PWC(0x3F) +#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>(13-DMA_CR_OFFSET)) + +#define SET_DMA_PHC(x) (((x)&0x7)<<(10-DMA_CR_OFFSET)) /* Peripheral Hold Cycles */ +#define DMA_PHC_MASK SET_DMA_PHC(0x7) +#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>(10-DMA_CR_OFFSET)) + +#define DMA_ETD_OUTPUT (1<<(9-DMA_CR_OFFSET)) /* EOT pin is a TC output */ +#define SET_DMA_ETD(x) (((x)&0x1)<<(9-DMA_CR_OFFSET)) + +#define DMA_TCE_ENABLE (1<<(8-DMA_CR_OFFSET)) +#define SET_DMA_TCE(x) (((x)&0x1)<<(8-DMA_CR_OFFSET)) + +#define DMA_DEC (1<<(2)) /* Address Decrement */ +#define SET_DMA_DEC(x) (((x)&0x1)<<2) +#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2) + + +/* + * Transfer Modes + * These modes are defined in a way that makes it possible to + * simply "or" in the value in the control register. + */ + +#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */ + + /* Device-paced memory to memory, */ + /* device is at source address */ +#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM)) + + /* Device-paced memory to memory, */ + /* device is at destination address */ +#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM)) + +#define SGL_LIST_SIZE 16384 +#define DMA_PPC4xx_SIZE SGL_LIST_SIZE + +#define SET_DMA_PRIORITY(x) (((x)&0x3)<<(6-DMA_CR_OFFSET)) /* DMA Channel Priority */ +#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3) +#define PRIORITY_LOW 0 +#define PRIORITY_MID_LOW 1 +#define PRIORITY_MID_HIGH 2 +#define PRIORITY_HIGH 3 +#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>(6-DMA_CR_OFFSET)) + + +#define SET_DMA_PREFETCH(x) (((x)&0x3)<<(4-DMA_CR_OFFSET)) /* Memory Read Prefetch */ +#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3) +#define PREFETCH_1 0 /* Prefetch 1 Double Word */ +#define PREFETCH_2 1 +#define PREFETCH_4 2 +#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>(4-DMA_CR_OFFSET)) + +#define DMA_PCE (1<<(3-DMA_CR_OFFSET)) /* Parity Check Enable */ +#define SET_DMA_PCE(x) (((x)&0x1)<<(3-DMA_CR_OFFSET)) +#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>(3-DMA_CR_OFFSET)) + +/* + * DMA Polarity Configuration Register + */ +#define DMAReq_ActiveLow(chan) (1<<(31-(chan*3))) +#define DMAAck_ActiveLow(chan) (1<<(30-(chan*3))) +#define EOT_ActiveLow(chan) (1<<(29-(chan*3))) /* End of Transfer */ + +/* + * DMA Sleep Mode Register + */ +#define SLEEP_MODE_ENABLE (1<<21) + +/* + * DMA Status Register + */ +#define DMA_CS0 (1<<31) /* Terminal Count has been reached */ +#define DMA_CS1 (1<<30) +#define DMA_CS2 (1<<29) +#define DMA_CS3 (1<<28) + +#define DMA_TS0 (1<<27) /* End of Transfer has been requested */ +#define DMA_TS1 (1<<26) +#define DMA_TS2 (1<<25) +#define DMA_TS3 (1<<24) + +#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */ +#define DMA_CH1_ERR (1<<22) +#define DMA_CH2_ERR (1<<21) +#define DMA_CH3_ERR (1<<20) + +#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */ +#define DMA_IN_DMA_REQ1 (1<<18) +#define DMA_IN_DMA_REQ2 (1<<17) +#define DMA_IN_DMA_REQ3 (1<<16) + +#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */ +#define DMA_EXT_DMA_REQ1 (1<<14) +#define DMA_EXT_DMA_REQ2 (1<<13) +#define DMA_EXT_DMA_REQ3 (1<<12) + +#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */ +#define DMA_CH1_BUSY (1<<10) +#define DMA_CH2_BUSY (1<<9) +#define DMA_CH3_BUSY (1<<8) + +#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */ +#define DMA_SG1 (1<<6) +#define DMA_SG2 (1<<5) +#define DMA_SG3 (1<<4) + +/* DMA Channel Count Register */ +#define DMA_CTC_TCIE (1<<29) /* Terminal Count Interrupt Enable */ +#define DMA_CTC_ETIE (1<<28) /* EOT Interupt Enable */ +#define DMA_CTC_EIE (1<<27) /* Error Interrupt Enable */ +#define DMA_CTC_BTEN (1<<23) /* Burst Enable/Disable bit */ +#define DMA_CTC_BSIZ_MSK (3<<21) /* Mask of the Burst size bits */ +#define DMA_CTC_BSIZ_2 (0) +#define DMA_CTC_BSIZ_4 (1<<21) +#define DMA_CTC_BSIZ_8 (2<<21) +#define DMA_CTC_BSIZ_16 (3<<21) +#define DMA_CTC_TC_MASK 0xFFFFF + +/* + * DMA SG Command Register + */ +#define SSG_ENABLE(chan) (1<<(31-chan)) /* Start Scatter Gather */ +#define SSG_MASK_ENABLE(chan) (1<<(15-chan)) /* Enable writing to SSG0 bit */ + + +/* + * DMA Scatter/Gather Descriptor Bit fields + */ +#define SG_LINK (1<<31) /* Link */ +#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */ +#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */ +#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */ +#define SG_COUNT_MASK 0xFFFF /* Count Field */ + +#define SET_DMA_CONTROL \ + (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \ + SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */\ + SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \ + SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \ + SET_DMA_PL(p_init->pl) | /* peripheral location */ \ + SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \ + SET_DMA_SAI(p_init->sai) | /* src addr increment */ \ + SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \ + SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \ + SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \ + SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \ + SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \ + SET_DMA_PREFETCH(p_init->pf) /* read prefetch */) + +#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan)) + + +/** + * struct ppc460ex_dma_device - internal representation of an DMA device + * @pdev: Platform device + * @id: HW DMA Device selector + * @dma_desc_pool: base of DMA descriptor region (DMA address) + * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) + * @common: embedded struct dma_device + */ +typedef struct ppc460ex_plb_dma_device { + //struct platform_device *pdev; + void __iomem *reg_base; + struct device *dev; + struct resource reg; /* Resource for register */ + int id; + struct ppc460ex_plb_dma_chan *chan[MAX_PPC460EX_DMA_CHANNELS]; + wait_queue_head_t queue; +} ppc460ex_plb_dma_dev_t; + +typedef uint32_t sgl_handle_t; +/** + * struct ppc460ex_dma_chan - internal representation of an ADMA channel + * @lock: serializes enqueue/dequeue operations to the slot pool + * @device: parent device + * @chain: device chain view of the descriptors + * @common: common dmaengine channel object members + * @all_slots: complete domain of slots usable by the channel + * @reg: Resource for register + * @pending: allows batching of hardware operations + * @completed_cookie: identifier for the most recently completed operation + * @slots_allocated: records the actual size of the descriptor slot pool + * @hw_chain_inited: h/w descriptor chain initialization flag + * @irq_tasklet: bottom half where ppc460ex_adma_slot_cleanup runs + * @needs_unmap: if buffers should not be unmapped upon final processing + */ +typedef struct ppc460ex_plb_dma_chan { + void __iomem *reg_base; + struct ppc460ex_plb_dma_device *device; + struct timer_list cleanup_watchdog; + struct resource reg; /* Resource for register */ + unsigned int chan_id; + struct tasklet_struct irq_tasklet; + sgl_handle_t *phandle; + unsigned short in_use; /* set when channel is being used, clr when + * available. + */ + /* + * Valid polarity settings: + * DMAReq_ActiveLow(n) + * DMAAck_ActiveLow(n) + * EOT_ActiveLow(n) + * + * n is 0 to max dma chans + */ + unsigned int polarity; + + char buffer_enable; /* Boolean: buffer enable */ + char tce_enable; /* Boolean: terminal count enable */ + char etd_output; /* Boolean: eot pin is a tc output */ + char pce; /* Boolean: parity check enable */ + + /* + * Peripheral location: + * INTERNAL_PERIPHERAL (UART0 on the 405GP) + * EXTERNAL_PERIPHERAL + */ + char pl; /* internal/external peripheral */ + + /* + * Valid pwidth settings: + * PW_8 + * PW_16 + * PW_32 + * PW_64 + */ + unsigned int pwidth; + + char dai; /* Boolean: dst address increment */ + char sai; /* Boolean: src address increment */ + + /* + * Valid psc settings: 0-3 + */ + unsigned int psc; /* Peripheral Setup Cycles */ + + /* + * Valid pwc settings: + * 0-63 + */ + unsigned int pwc; /* Peripheral Wait Cycles */ + + /* + * Valid phc settings: + * 0-7 + */ + unsigned int phc; /* Peripheral Hold Cycles */ + + /* + * Valid cp (channel priority) settings: + * PRIORITY_LOW + * PRIORITY_MID_LOW + * PRIORITY_MID_HIGH + * PRIORITY_HIGH + */ + unsigned int cp; /* channel priority */ + + /* + * Valid pf (memory read prefetch) settings: + * + * PREFETCH_1 + * PREFETCH_2 + * PREFETCH_4 + */ + unsigned int pf; /* memory read prefetch */ + + /* + * Boolean: channel interrupt enable + * NOTE: for sgl transfers, only the last descriptor will be setup to + * interrupt. + */ + char int_enable; + + char shift; /* easy access to byte_count shift, based on */ + /* the width of the channel */ + + uint32_t control; /* channel control word */ + + /* These variabled are used ONLY in single dma transfers */ + unsigned int mode; /* transfer mode */ + phys_addr_t addr; + char ce; /* channel enable */ + char int_on_final_sg;/* for scatter/gather - only interrupt on last sg */ + +} ppc460ex_plb_dma_ch_t; + +/* + * PPC44x DMA implementations have a slightly different + * descriptor layout. Probably moved about due to the + * change to 64-bit addresses and link pointer. I don't + * know why they didn't just leave control_count after + * the dst_addr. + */ +#ifdef PPC4xx_DMA_64BIT +typedef struct { + uint32_t control; + uint32_t control_count; + phys_addr_t src_addr; + phys_addr_t dst_addr; + phys_addr_t next; +} ppc_sgl_t; +#else +typedef struct { + uint32_t control; + phys_addr_t src_addr; + phys_addr_t dst_addr; + uint32_t control_count; + uint32_t next; +} ppc_sgl_t; +#endif + + + +typedef struct { + unsigned int ch_id; + uint32_t control; /* channel ctrl word; loaded from each descrptr */ + uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */ + dma_addr_t dma_addr; /* dma (physical) address of this list */ + dma_addr_t dummy; /*Dummy variable to allow quad word alignment*/ + ppc_sgl_t *phead; + dma_addr_t phead_dma; + ppc_sgl_t *ptail; + dma_addr_t ptail_dma; +} sgl_list_info_t; + +typedef struct { + phys_addr_t *src_addr; + phys_addr_t *dst_addr; + phys_addr_t dma_src_addr; + phys_addr_t dma_dst_addr; +} pci_alloc_desc_t; + +#define PPC460EX_DMA_SGXFR_COMPLETE(id) (!((1 << (11-id)) & mfdcr(DCR_DMA2P40_SR))) +#define PPC460EX_DMA_CHAN_BUSY(id) ( (1 << (11-id)) & mfdcr(DCR_DMA2P40_SR) ) +#define DMA_STATUS(id) (mfdcr(DCR_DMA2P40_SR)) +#define CLEAR_DMA_STATUS(id) (mtdcr(DCR_DMA2P40_SR, 0xFFFFFFFF)) +#define PPC460EX_DMA_SGSTAT_FREE(id) (!((1 << (7-id)) & mfdcr(DCR_DMA2P40_SR)) ) +#define PPC460EX_DMA_TC_REACHED(id) ( (1 << (31-id)) & mfdcr(DCR_DMA2P40_SR) ) +#define PPC460EX_DMA_CHAN_XFR_COMPLETE(id) ( (!PPC460EX_DMA_CHAN_BUSY(id)) && (PPC460EX_DMA_TC_REACHED(id)) ) +#define PPC460EX_DMA_CHAN_SGXFR_COMPLETE(id) ( (!PPC460EX_DMA_CHAN_BUSY(id)) && PPC460EX_DMA_SGSTAT_FREE(id) ) +#define PPC460EX_DMA_SG_IN_PROGRESS(id) ( (1 << (7-id)) | (1 << (11-id)) ) +#define PPC460EX_DMA_SG_OP_COMPLETE(id) ( (PPC460EX_DMA_SG_IN_PROGRESS(id) & DMA_STATUS(id) ) == 0) + +extern ppc460ex_plb_dma_dev_t *adev; +int ppc460ex_init_dma_channel(ppc460ex_plb_dma_dev_t *adev, + unsigned int ch_id, + ppc460ex_plb_dma_ch_t *p_init); + +int ppc460ex_set_src_addr(int ch_id, phys_addr_t src_addr); + +int ppc460ex_set_dst_addr(int ch_id, phys_addr_t dst_addr); + +int ppc460ex_set_dma_mode(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int mode); + +void ppc460ex_set_dma_count(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int count); + +int ppc460ex_enable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id); + +int ppc460ex_enable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id); + +int ppc460ex_get_dma_channel(void); + +void ppc460ex_disable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id); + +int ppc460ex_clear_dma_status(unsigned int ch_id); + +#if 0 +extern int test_dma_memcpy(void *src, void *dst, unsigned int length, unsigned int dma_ch); + +extern int test_sgdma_memcpy(void *src, void *dst, void *src1, void *dst1, + unsigned int length, unsigned int dma_ch); +#endif |